hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
6d1c8559c9e8efb26dafb2b74943b2fb48041af0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
#include <pthread.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
__device__ int is_a_match (char *attempt) {
char plain_password1[] = "SH1234";
char plain_password2[] = "RE2345";
char plain_password3[] = "EJ3456";
char plain_password4[] = "AN4567";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *p1 = plain_password1;
char *p2 = plain_password2;
char *p3 = plain_password3;
char *p4 = plain_password4;
while (*a == *p1) {
if (*a == '\0')
{
printf ("Password: %s\n", plain_password1);
break;
}
a++;
p1++;
}
while(*b == *p2) {
if(*b == '\0')
{
printf("Password: %s\n",plain_password2);
break;
}
b++;
p2++;
}
while(*c == *p3) {
if(*c == '\0')
{
printf("Password: %s\n",plain_password3);
break;
}
c++;
p3++;
}
while (*d == *p4) {
if (*d == '\0')
{
printf ("Password: %s\n", plain_password4);
return 1;
}
d++;
p4++;
}
return 0;
}
__global__ void kernel () {
char i1, i2, i3, i4;
char password [7];
password [6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstMatch = i;
char secondMatch = j;
password [0] = firstMatch;
password [1] = secondMatch;
for (i1='0'; i1<='9'; i1++) {
for (i2='0'; i2<='9'; i2++) {
for (i3='0'; i3<='9'; i3++) {
for (i4='0'; i4<='9'; i4++) {
password [2] = i1;
password [3] = i2;
password [4] = i3;
password [5] = i4;
if(is_a_match(password)) {
}
else {
//printf ("tried: %s\n", password);
}
}
}
}
}
}
int time_difference (struct timespec *start, struct timespec *finish, long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return! (*difference > 0);
}
int main () {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
pthread_mutex_lock(&mutex);
hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
pthread_mutex_unlock(&mutex);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
| 6d1c8559c9e8efb26dafb2b74943b2fb48041af0.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
#include <pthread.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
__device__ int is_a_match (char *attempt) {
char plain_password1[] = "SH1234";
char plain_password2[] = "RE2345";
char plain_password3[] = "EJ3456";
char plain_password4[] = "AN4567";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *p1 = plain_password1;
char *p2 = plain_password2;
char *p3 = plain_password3;
char *p4 = plain_password4;
while (*a == *p1) {
if (*a == '\0')
{
printf ("Password: %s\n", plain_password1);
break;
}
a++;
p1++;
}
while(*b == *p2) {
if(*b == '\0')
{
printf("Password: %s\n",plain_password2);
break;
}
b++;
p2++;
}
while(*c == *p3) {
if(*c == '\0')
{
printf("Password: %s\n",plain_password3);
break;
}
c++;
p3++;
}
while (*d == *p4) {
if (*d == '\0')
{
printf ("Password: %s\n", plain_password4);
return 1;
}
d++;
p4++;
}
return 0;
}
__global__ void kernel () {
char i1, i2, i3, i4;
char password [7];
password [6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstMatch = i;
char secondMatch = j;
password [0] = firstMatch;
password [1] = secondMatch;
for (i1='0'; i1<='9'; i1++) {
for (i2='0'; i2<='9'; i2++) {
for (i3='0'; i3<='9'; i3++) {
for (i4='0'; i4<='9'; i4++) {
password [2] = i1;
password [3] = i2;
password [4] = i3;
password [5] = i4;
if(is_a_match(password)) {
}
else {
//printf ("tried: %s\n", password);
}
}
}
}
}
}
int time_difference (struct timespec *start, struct timespec *finish, long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return! (*difference > 0);
}
int main () {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
pthread_mutex_lock(&mutex);
kernel <<<26,26>>>();
cudaThreadSynchronize();
pthread_mutex_unlock(&mutex);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
d10d3e1a6d40e423061f6f1d36045f25b4c9ff7f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include <hip/hip_runtime_api.h>
#include "cusolverSp_LOWLEVEL_PREVIEW.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define INPUTDENSEA prhs[0]
#define INPUTDENSEB prhs[1]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=2)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTDENSEA);
if ((mxIsChar(INPUTDENSEA))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(INPUTDENSEB);
if ((mxIsChar(INPUTDENSEB))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTDENSEB)) {
mxGPUArray const *INPUTDENSEGPUA;
mxGPUArray const *INPUTDENSEGPUB;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA);
INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB);
if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){
const mwSize *dimsGPUSA;
dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA);
int numARows, numAColumns;
numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */
const mwSize *dimsGPUSB;
dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB);
int numBRows, numBColumns;
numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */
numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
hipDoubleComplex const *d_A_dense;
d_A_dense = (hipDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA));
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; //gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect1[1] = {numARows};
mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1);
//hipDoubleComplex *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
//gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice));
cusparseSafeCall(hipsparseZnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
//hipDoubleComplex *d_A; //gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; //gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; //gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(hipsparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(PerVect1);
hipDoubleComplex const *d_B_dense;
d_B_dense = (hipDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB));
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
hipDoubleComplex *VALOUT = (hipDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(hipFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
hipsparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
hipsparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTDENSEB))){
// if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be hipDoubleComplex precision.");
// }
if((!mxIsSparse(INPUTDENSEA))&& (!mxIsSparse(INPUTDENSEB)) ){
mxInitGPU();
const mwSize *dimsCPUA;
dimsCPUA=mxGetDimensions(INPUTDENSEA);
int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */
const mwSize *dimsCPUB;
dimsCPUB=mxGetDimensions(INPUTDENSEB);
int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */
int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
hipDoubleComplex *h_A_dense1;
h_A_dense1 = (hipDoubleComplex *)mxGetComplexDoubles(INPUTDENSEA);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect1[1] = {numARows};
mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1);
//hipDoubleComplex *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A_dense = (hipDoubleComplex *)mxGPUGetData(OUTMA);
gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice));
cusparseSafeCall(hipsparseZnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
//hipDoubleComplex *d_A; gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(hipsparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(OUTMA);
mxGPUDestroyGPUArray(PerVect1);
hipDoubleComplex *h_B_dense1;
h_B_dense1 = (hipDoubleComplex *)mxGetComplexDoubles(INPUTDENSEB);
size_t pivot_dimensionsvalueDB[1] = {numBRows};
mxGPUArray *OUTMB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueDB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_B_dense = (hipDoubleComplex *)mxGPUGetData(OUTMB);
gpuErrchk(hipMemcpy(d_B_dense, h_B_dense1, numBRows * sizeof(*d_B_dense), hipMemcpyHostToDevice));
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
hipDoubleComplex *VALOUT = (hipDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(OUTMB);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(hipFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
hipsparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
hipsparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
| d10d3e1a6d40e423061f6f1d36045f25b4c9ff7f.cu |
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include <cuda_runtime_api.h>
#include "cusolverSp_LOWLEVEL_PREVIEW.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define INPUTDENSEA prhs[0]
#define INPUTDENSEB prhs[1]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=2)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTDENSEA);
if ((mxIsChar(INPUTDENSEA))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(INPUTDENSEB);
if ((mxIsChar(INPUTDENSEB))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTDENSEB)) {
mxGPUArray const *INPUTDENSEGPUA;
mxGPUArray const *INPUTDENSEGPUB;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA);
INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB);
if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){
const mwSize *dimsGPUSA;
dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA);
int numARows, numAColumns;
numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */
const mwSize *dimsGPUSB;
dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB);
int numBRows, numBColumns;
numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */
numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
cuDoubleComplex const *d_A_dense;
d_A_dense = (cuDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA));
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; //gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect1[1] = {numARows};
mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1);
//cuDoubleComplex *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
//gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
cusparseSafeCall(cusparseZnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
//cuDoubleComplex *d_A; //gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; //gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; //gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(cusparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(PerVect1);
cuDoubleComplex const *d_B_dense;
d_B_dense = (cuDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB));
mxGPUDestroyGPUArray(INPUTDENSEGPUB);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
cuDoubleComplex *VALOUT = (cuDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(cudaFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
cusparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
cusparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTDENSEB))){
// if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be cuDoubleComplex precision.");
// }
if((!mxIsSparse(INPUTDENSEA))&& (!mxIsSparse(INPUTDENSEB)) ){
mxInitGPU();
const mwSize *dimsCPUA;
dimsCPUA=mxGetDimensions(INPUTDENSEA);
int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */
const mwSize *dimsCPUB;
dimsCPUB=mxGetDimensions(INPUTDENSEB);
int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */
int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
cuDoubleComplex *h_A_dense1;
h_A_dense1 = (cuDoubleComplex *)mxGetComplexDoubles(INPUTDENSEA);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect1[1] = {numARows};
mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1);
//cuDoubleComplex *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A_dense = (cuDoubleComplex *)mxGPUGetData(OUTMA);
gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
cusparseSafeCall(cusparseZnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
//cuDoubleComplex *d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(cusparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(OUTMA);
mxGPUDestroyGPUArray(PerVect1);
cuDoubleComplex *h_B_dense1;
h_B_dense1 = (cuDoubleComplex *)mxGetComplexDoubles(INPUTDENSEB);
size_t pivot_dimensionsvalueDB[1] = {numBRows};
mxGPUArray *OUTMB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueDB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_B_dense = (cuDoubleComplex *)mxGPUGetData(OUTMB);
gpuErrchk(cudaMemcpy(d_B_dense, h_B_dense1, numBRows * sizeof(*d_B_dense), cudaMemcpyHostToDevice));
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
cuDoubleComplex *VALOUT = (cuDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(OUTMB);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(cudaFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
cusparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
cusparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
|
6b7e0af14f7dc5b1bfa3cb7143d6f7d8dacda043.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
nvcc StarBranchRun.cu -o StarBranchRun.exe -lglut -lGL -lGLU -lm
nvcc StarBranchRun.cu -o StarBranchRun.exe -lglut -lGL -lGLU -lm --use_fast_math
*/
#include "../CommonCompileFiles/binaryStarCommonIncludes.h"
#include "../CommonCompileFiles/binaryStarCommonDefines.h"
#include "../CommonCompileFiles/binaryStarCommonGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonFunctions.h"
#include "../CommonCompileFiles/binaryStarCommonRunGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonRunFunctions.h"
//FIle to hold the branch run parameters.
//FILE *StartPosVelForceFile;
FILE *BranchRunParameters;
//Globals read in from the BranchSetup file.
float BranchRunTime;
float GrowthStartTimeStar1, GrowthStopTimeStar1, PercentForceIncreaseStar1;
float GrowthStartTimeStar2, GrowthStopTimeStar2, PercentForceIncreaseStar2;
float4 InitailPosStar1, InitailVelStar1;
float4 InitailPosStar2, InitailVelStar2;
void createAndLoadFolderForNewBranchRun()
{
//Create output folder to store the branch run
time_t t = time(0);
struct tm * now = localtime( & t );
int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min;
stringstream smonth, sday, stimeHour, stimeMin;
smonth << month;
sday << day;
stimeHour << curTimeHour;
stimeMin << curTimeMin;
string monthday;
if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str();
else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str();
string foldernametemp = "BranchRun:" + monthday;
const char *branchFolderName = foldernametemp.c_str();
//char *branchFolderName = foldernametemp;
mkdir(branchFolderName , S_IRWXU|S_IRWXG|S_IRWXO);
//Copying files into the branch folder
FILE *fileIn;
FILE *fileOut;
long sizeOfFile;
char *buffer;
//Copying files from the main star folder into the branch folder
chdir(branchFolderName);
fileIn = fopen("../BranchSetup", "rb");
if(fileIn == NULL)
{
printf("\n\n The BranchSetup file does not exist\n\n");
exit(0);
}
fseek (fileIn , 0 , SEEK_END);
sizeOfFile = ftell(fileIn);
rewind (fileIn);
buffer = (char*)malloc(sizeof(char)*sizeOfFile);
fread (buffer, 1, sizeOfFile, fileIn);
fileOut = fopen("BranchSetup", "wb");
fwrite (buffer, 1, sizeOfFile, fileOut);
fclose(fileIn);
fclose(fileOut);
fileIn = fopen("../ContinueFiles/ContinueRun", "rb");
if(fileIn == NULL)
{
printf("\n\n The ContinueFiles/ContinueRun file does not exist\n\n");
exit(0);
}
fseek (fileIn , 0 , SEEK_END);
sizeOfFile = ftell(fileIn);
rewind (fileIn);
buffer = (char*)malloc(sizeof(char)*sizeOfFile);
fread (buffer, 1, sizeOfFile, fileIn);
fileOut = fopen("ContinueRun", "wb");
fwrite (buffer, 1, sizeOfFile, fileOut);
fclose(fileIn);
fclose(fileOut);
system("chmod 755 ./ContinueRun");
fileIn = fopen("../FilesFromBuild/RunParameters", "rb");
if(fileIn == NULL)
{
printf("\n\n The FilesFromBuild/RunParameters file does not exist\n\n");
exit(0);
}
fseek (fileIn , 0 , SEEK_END);
sizeOfFile = ftell(fileIn);
rewind (fileIn);
buffer = (char*)malloc(sizeof(char)*sizeOfFile);
fread (buffer, 1, sizeOfFile, fileIn);
fileOut = fopen("RunParameters", "wb");
fwrite (buffer, 1, sizeOfFile, fileOut);
fclose(fileIn);
fclose(fileOut);
//Creating the positions and velosity file to dump to stuff to make movies out of.
PosAndVelFile = fopen("PosAndVel", "wb");
//Creating file to hold the ending time Pos vel and forces to continue the run.
//StartPosVelForceFile = fopen("StartPosVelForce", "wb");
//Creating the BranchRunParameter file.
BranchRunParameters = fopen("BranchRunParameters", "wb");
free (buffer);
}
void readAndSetRunParameters()
{
ifstream data;
string name;
data.open("RunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> SystemLengthConverterToKilometers;
getline(data,name,'=');
data >> SystemMassConverterToKilograms;
getline(data,name,'=');
data >> SystemTimeConverterToSeconds;
getline(data,name,'=');
data >> NumberElementsStar1;
getline(data,name,'=');
data >> NumberElementsStar2;
getline(data,name,'=');
data >> CorePushBackReduction;
getline(data,name,'=');
data >> PlasmaPushBackReduction;
}
else
{
printf("\nTSU Error could not open RunParameters file\n");
exit(0);
}
data.close();
NumberElements = NumberElementsStar1 + NumberElementsStar2;
}
void readAndSetBranchParameters()
{
ifstream data;
string name;
data.open("BranchSetup");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> InitailPosStar1.x;
getline(data,name,'=');
data >> InitailPosStar1.y;
getline(data,name,'=');
data >> InitailPosStar1.z;
getline(data,name,'=');
data >> InitailPosStar2.x;
getline(data,name,'=');
data >> InitailPosStar2.y;
getline(data,name,'=');
data >> InitailPosStar2.z;
getline(data,name,'=');
data >> InitailVelStar1.x;
getline(data,name,'=');
data >> InitailVelStar1.y;
getline(data,name,'=');
data >> InitailVelStar1.z;
getline(data,name,'=');
data >> InitailVelStar2.x;
getline(data,name,'=');
data >> InitailVelStar2.y;
getline(data,name,'=');
data >> InitailVelStar2.z;
getline(data,name,'=');
data >> BranchRunTime;
getline(data,name,'=');
data >> GrowthStartTimeStar1;
getline(data,name,'=');
data >> GrowthStopTimeStar1;
getline(data,name,'=');
data >> PercentForceIncreaseStar1;
getline(data,name,'=');
data >> GrowthStartTimeStar2;
getline(data,name,'=');
data >> GrowthStopTimeStar2;
getline(data,name,'=');
data >> PercentForceIncreaseStar2;
getline(data,name,'=');
data >> RecordRate;
getline(data,name,'=');
data >> DrawRate;
}
else
{
printf("\nTSU Error could not open BranchSetup file\n");
exit(0);
}
data.close();
//Taking input positions into our units
InitailPosStar1.x /= SystemLengthConverterToKilometers;
InitailPosStar1.y /= SystemLengthConverterToKilometers;
InitailPosStar1.z /= SystemLengthConverterToKilometers;
InitailPosStar2.x /= SystemLengthConverterToKilometers;
InitailPosStar2.y /= SystemLengthConverterToKilometers;
InitailPosStar2.z /= SystemLengthConverterToKilometers;
//Taking input velocities into our units
InitailVelStar1.x /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar1.y /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar1.z /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar2.x /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar2.y /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar2.z /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
//Taking the run times into our units
BranchRunTime *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStartTimeStar1 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStopTimeStar1 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStartTimeStar2 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStopTimeStar2 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
if(BranchRunTime < GrowthStopTimeStar1)
{
printf("\nTSU Error: BranchRunTime is less than GrowthStopTimeStar1.\n");
exit(0);
}
if(BranchRunTime < GrowthStopTimeStar2)
{
printf("\nTSU Error: BranchRunTime is less than GrowthStopTimeStar2.\n");
exit(0);
}
//Recording info into the BranchRunParameters file
fprintf(BranchRunParameters, "\n RecordRate = %d", RecordRate);
fprintf(BranchRunParameters, "\n DrawRate = %d", DrawRate);
fclose(BranchRunParameters);
}
void readInTheInitialsStars()
{
//chdir("../");
//system("ls";
FILE *startFile = fopen("../FilesFromBuild/StartPosVelForce","rb");
if(startFile == NULL)
{
printf("\n\n The StartPosVelForce file does not exist\n\n");
exit(0);
}
fread(&StartTime, sizeof(float), 1, startFile);
fread(PosCPU, sizeof(float4), NumberElements, startFile);
fread(VelCPU, sizeof(float4), NumberElements, startFile);
fread(ForceCPU, sizeof(float4), NumberElements, startFile);
fclose(startFile);
}
void setInitialConditions()
{
for(int i = 0; i < NumberElementsStar1; i++)
{
PosCPU[i].x += InitailPosStar1.x;
PosCPU[i].y += InitailPosStar1.y;
PosCPU[i].z += InitailPosStar1.z;
VelCPU[i].x += InitailVelStar1.x;
VelCPU[i].y += InitailVelStar1.y;
VelCPU[i].z += InitailVelStar1.z;
}
for(int i = NumberElementsStar1; i < NumberElements; i++)
{
PosCPU[i].x += InitailPosStar2.x;
PosCPU[i].y += InitailPosStar2.y;
PosCPU[i].z += InitailPosStar2.z;
VelCPU[i].x += InitailVelStar2.x;
VelCPU[i].y += InitailVelStar2.y;
VelCPU[i].z += InitailVelStar2.z;
}
CenterOfView = getCenterOfMass();
}
__global__ void getForces(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int numberOfElements, float pressureIncrease1, float pressureIncrease2, float corePushBackReduction, float plasmaPushBackReduction, int gPUNumber, int gPUsUsed)
{
int id, ids, i, j, k;
float4 posMe, velMe, forceMe;
float4 partialForce;
double forceSumX, forceSumY, forceSumZ;
__shared__ float4 shPos[BLOCKSIZE];
__shared__ float4 shVel[BLOCKSIZE];
__shared__ float4 shForce[BLOCKSIZE];
id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
if(numberOfElements <= id)
{
printf("\n TSU error: id out of bounds in getForces. \n");
}
forceSumX = 0.0;
forceSumY = 0.0;
forceSumZ = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
velMe.x = vel[id].x;
velMe.y = vel[id].y;
velMe.z = vel[id].z;
velMe.w = vel[id].w;
forceMe.x = force[id].x;
forceMe.y = force[id].y;
forceMe.z = force[id].z;
forceMe.w = force[id].w;
for(k =0; k < gPUsUsed; k++)
{
for(j = 0; j < gridDim.x; j++)
{
shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
__syncthreads();
#pragma unroll 32
for(i = 0; i < blockDim.x; i++)
{
ids = i + blockDim.x*j + blockDim.x*gridDim.x*k;
if(id != ids)
{
if(id == 0 && ids == numberElementsStar1)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == numberElementsStar1 && ids == 0)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == 0 || id == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(ids == 0 || ids == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else
{
partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPushBackReduction);
}
forceSumX += partialForce.x;
forceSumY += partialForce.y;
forceSumZ += partialForce.z;
}
}
__syncthreads();
}
}
force[id].x = (float)forceSumX;
force[id].y = (float)forceSumY;
force[id].z = (float)forceSumZ;
if(0 < id && id < numberElementsStar1)
{
vel[id].w += pressureIncrease1;
}
else if(numberElementsStar1 < id)
{
vel[id].w += pressureIncrease2;
}
}
__global__ void moveBodies(float4 *pos, float4 *vel, float4 *force, float dt, int gPUNumber)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
vel[id].x += (force[id].x/pos[id].w)*dt;
vel[id].y += (force[id].y/pos[id].w)*dt;
vel[id].z += (force[id].z/pos[id].w)*dt;
pos[id].x += vel[id].x*dt;
pos[id].y += vel[id].y*dt;
pos[id].z += vel[id].z*dt;
}
float starNbody(float time, float runTime, float dt, int gPUsUsed)
{
int tDraw = 0;
int tRecord = 0;
float pressureIncrease1, pressureIncrease2;
int offSet = NumberElements/gPUsUsed;
pressureIncrease1 = 1.0f;
pressureIncrease2 = 1.0f;
while(time < runTime)
{
//Getting forces
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
hipLaunchKernelGGL(( getForces), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, pressureIncrease1, pressureIncrease2, CorePushBackReduction, PlasmaPushBackReduction, i, gPUsUsed);
errorCheck("getForces");
}
//Moving elements
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
hipLaunchKernelGGL(( moveBodies), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], dt, i);
errorCheck("moveBodies");
}
hipDeviceSynchronize();
errorCheck("hipDeviceSynchronize");
//Sharing memory
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
for(int j = 0; j < gPUsUsed; j++)
{
if(i != j)
{
//printf(" pos i = %d j = %d\n", i, j);
//hipMemcpyAsync(PosGPU[j] + (i*offSet)*sizeof(float4), PosGPU[i] + (i*offSet)*sizeof(float4), (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
hipMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
errorCheck("hipMemcpy Pos A");
//printf(" vel i = %d j = %d\n", i, j);
//hipMemcpyAsync(VelGPU[j] + (i*offSet)*sizeof(float4), VelGPU[i] + (i*offSet)*sizeof(float4), (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
hipMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
errorCheck("hipMemcpy Vel");
}
}
}
hipDeviceSynchronize();
errorCheck("hipDeviceSynchronize");
//Increasing the plasma elements push back. I had to start a dt forward so I could get the blocks to sync.
if((GrowthStartTimeStar1 - dt) < time && time < (GrowthStopTimeStar1 - dt))
{
pressureIncrease1 = PercentForceIncreaseStar1;
}
else
{
pressureIncrease1 = 0.0f;
}
if((GrowthStartTimeStar2 - dt) < time && time < (GrowthStopTimeStar2 - dt))
{
pressureIncrease2 = PercentForceIncreaseStar2;
}
else
{
pressureIncrease2 = 0.0f;
}
if(tDraw == DrawRate)
{
//Because it is shared above it will only need to be copied from one GPU.
hipSetDevice(0);
errorCheck("hipSetDevice");
hipMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Pos draw");
drawPicture();
tDraw = 0;
printf("\n Time in days = %f", time*SystemTimeConverterToSeconds/(60.0*60.0*24.0));
}
if(tRecord == RecordRate)
{
//Because it is shared above it will only need to be copied from one GPU.
hipSetDevice(0);
errorCheck("hipSetDevice");
hipMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Pos record");
hipMemcpy(VelCPU, VelGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Vel record");
recordPosAndVel(time);
tRecord = 0;
}
tDraw++;
tRecord++;
time += dt;
}
return(time - dt);
}
void control()
{
struct sigaction sa;
float time = StartTime;
int gPUsUsed;
clock_t startTimer, endTimer;
//Starting the timer.
startTimer = clock();
// Handling input from the screen.
sa.sa_handler = signalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler
if (sigaction(SIGINT, &sa, NULL) == -1)
{
printf("\nTSU Error: sigaction error\n");
}
// Creating branch folder and copying in all the files that contributed to making the branch run.
printf("\n Creating and loading folder for the branch run.\n");
createAndLoadFolderForNewBranchRun();
// Reading in the build parameters.
printf("\n Reading and setting the run parameters.\n");
readAndSetRunParameters();
// Reading in the branch parameters.
printf("\n Reading and setting the branch parameters.\n");
readAndSetBranchParameters();
// Allocating memory for CPU and GPU.
printf("\n Allocating memory on the GPU and CPU and opening positions and velocities file.\n");
allocateCPUMemory();
// Reading in the raw stars generated by the build program.
printf("\n Reading in the stars that were generated in the build program.\n");
readInTheInitialsStars();
// Setting initial conditions.
printf("\n Setting initial conditions for the branch run.\n");
setInitialConditions();
// Draw the intial configuration.
printf("\n Drawing initial picture.\n");
drawPicture();
// Seting up the GPUs.
printf("\n Setting up the GPU.\n");
gPUsUsed = deviceSetup();
// Running the simulation.
printf("\n Running the simulation.\n");
copyStarsUpToGPU(gPUsUsed);
time = starNbody(time, BranchRunTime, DT, gPUsUsed);
// Saving the the runs final positions and velosities.
printf("\n Saving the the runs final positions and velosities.\n");
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
// Saving any wanted stats about the run that you may want. I don't have anything to record as of yet.
printf("\n Saving any wanted stats about the run that you may want.\n");
//recordStarStats();
// Freeing memory.
printf("\n Cleaning up the run.\n");
cleanUp(gPUsUsed);
fclose(PosAndVelFile);
// Stopping timer and printing out run time.
endTimer = clock();
int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC;
int hours = seconds/3600;
int minutes = (seconds - hours*3600)/60;
seconds = seconds - hours*3600 - minutes*60;
printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds);
printf("\n The run has finished successfully \n\n");
exit(0);
}
int main(int argc, char** argv)
{
int xWindowSize = 2500;
int yWindowSize = 2500;
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(xWindowSize,yWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("Creating Stars");
glutReshapeFunc(reshape);
init();
glShadeModel(GL_SMOOTH);
glClearColor(0.0, 0.0, 0.0, 0.0);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutIdleFunc(control);
glutMainLoop();
return 0;
}
| 6b7e0af14f7dc5b1bfa3cb7143d6f7d8dacda043.cu | /*
nvcc StarBranchRun.cu -o StarBranchRun.exe -lglut -lGL -lGLU -lm
nvcc StarBranchRun.cu -o StarBranchRun.exe -lglut -lGL -lGLU -lm --use_fast_math
*/
#include "../CommonCompileFiles/binaryStarCommonIncludes.h"
#include "../CommonCompileFiles/binaryStarCommonDefines.h"
#include "../CommonCompileFiles/binaryStarCommonGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonFunctions.h"
#include "../CommonCompileFiles/binaryStarCommonRunGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonRunFunctions.h"
//FIle to hold the branch run parameters.
//FILE *StartPosVelForceFile;
FILE *BranchRunParameters;
//Globals read in from the BranchSetup file.
float BranchRunTime;
float GrowthStartTimeStar1, GrowthStopTimeStar1, PercentForceIncreaseStar1;
float GrowthStartTimeStar2, GrowthStopTimeStar2, PercentForceIncreaseStar2;
float4 InitailPosStar1, InitailVelStar1;
float4 InitailPosStar2, InitailVelStar2;
void createAndLoadFolderForNewBranchRun()
{
//Create output folder to store the branch run
time_t t = time(0);
struct tm * now = localtime( & t );
int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min;
stringstream smonth, sday, stimeHour, stimeMin;
smonth << month;
sday << day;
stimeHour << curTimeHour;
stimeMin << curTimeMin;
string monthday;
if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str();
else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str();
string foldernametemp = "BranchRun:" + monthday;
const char *branchFolderName = foldernametemp.c_str();
//char *branchFolderName = foldernametemp;
mkdir(branchFolderName , S_IRWXU|S_IRWXG|S_IRWXO);
//Copying files into the branch folder
FILE *fileIn;
FILE *fileOut;
long sizeOfFile;
char *buffer;
//Copying files from the main star folder into the branch folder
chdir(branchFolderName);
fileIn = fopen("../BranchSetup", "rb");
if(fileIn == NULL)
{
printf("\n\n The BranchSetup file does not exist\n\n");
exit(0);
}
fseek (fileIn , 0 , SEEK_END);
sizeOfFile = ftell(fileIn);
rewind (fileIn);
buffer = (char*)malloc(sizeof(char)*sizeOfFile);
fread (buffer, 1, sizeOfFile, fileIn);
fileOut = fopen("BranchSetup", "wb");
fwrite (buffer, 1, sizeOfFile, fileOut);
fclose(fileIn);
fclose(fileOut);
fileIn = fopen("../ContinueFiles/ContinueRun", "rb");
if(fileIn == NULL)
{
printf("\n\n The ContinueFiles/ContinueRun file does not exist\n\n");
exit(0);
}
fseek (fileIn , 0 , SEEK_END);
sizeOfFile = ftell(fileIn);
rewind (fileIn);
buffer = (char*)malloc(sizeof(char)*sizeOfFile);
fread (buffer, 1, sizeOfFile, fileIn);
fileOut = fopen("ContinueRun", "wb");
fwrite (buffer, 1, sizeOfFile, fileOut);
fclose(fileIn);
fclose(fileOut);
system("chmod 755 ./ContinueRun");
fileIn = fopen("../FilesFromBuild/RunParameters", "rb");
if(fileIn == NULL)
{
printf("\n\n The FilesFromBuild/RunParameters file does not exist\n\n");
exit(0);
}
fseek (fileIn , 0 , SEEK_END);
sizeOfFile = ftell(fileIn);
rewind (fileIn);
buffer = (char*)malloc(sizeof(char)*sizeOfFile);
fread (buffer, 1, sizeOfFile, fileIn);
fileOut = fopen("RunParameters", "wb");
fwrite (buffer, 1, sizeOfFile, fileOut);
fclose(fileIn);
fclose(fileOut);
//Creating the positions and velosity file to dump to stuff to make movies out of.
PosAndVelFile = fopen("PosAndVel", "wb");
//Creating file to hold the ending time Pos vel and forces to continue the run.
//StartPosVelForceFile = fopen("StartPosVelForce", "wb");
//Creating the BranchRunParameter file.
BranchRunParameters = fopen("BranchRunParameters", "wb");
free (buffer);
}
void readAndSetRunParameters()
{
ifstream data;
string name;
data.open("RunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> SystemLengthConverterToKilometers;
getline(data,name,'=');
data >> SystemMassConverterToKilograms;
getline(data,name,'=');
data >> SystemTimeConverterToSeconds;
getline(data,name,'=');
data >> NumberElementsStar1;
getline(data,name,'=');
data >> NumberElementsStar2;
getline(data,name,'=');
data >> CorePushBackReduction;
getline(data,name,'=');
data >> PlasmaPushBackReduction;
}
else
{
printf("\nTSU Error could not open RunParameters file\n");
exit(0);
}
data.close();
NumberElements = NumberElementsStar1 + NumberElementsStar2;
}
void readAndSetBranchParameters()
{
ifstream data;
string name;
data.open("BranchSetup");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> InitailPosStar1.x;
getline(data,name,'=');
data >> InitailPosStar1.y;
getline(data,name,'=');
data >> InitailPosStar1.z;
getline(data,name,'=');
data >> InitailPosStar2.x;
getline(data,name,'=');
data >> InitailPosStar2.y;
getline(data,name,'=');
data >> InitailPosStar2.z;
getline(data,name,'=');
data >> InitailVelStar1.x;
getline(data,name,'=');
data >> InitailVelStar1.y;
getline(data,name,'=');
data >> InitailVelStar1.z;
getline(data,name,'=');
data >> InitailVelStar2.x;
getline(data,name,'=');
data >> InitailVelStar2.y;
getline(data,name,'=');
data >> InitailVelStar2.z;
getline(data,name,'=');
data >> BranchRunTime;
getline(data,name,'=');
data >> GrowthStartTimeStar1;
getline(data,name,'=');
data >> GrowthStopTimeStar1;
getline(data,name,'=');
data >> PercentForceIncreaseStar1;
getline(data,name,'=');
data >> GrowthStartTimeStar2;
getline(data,name,'=');
data >> GrowthStopTimeStar2;
getline(data,name,'=');
data >> PercentForceIncreaseStar2;
getline(data,name,'=');
data >> RecordRate;
getline(data,name,'=');
data >> DrawRate;
}
else
{
printf("\nTSU Error could not open BranchSetup file\n");
exit(0);
}
data.close();
//Taking input positions into our units
InitailPosStar1.x /= SystemLengthConverterToKilometers;
InitailPosStar1.y /= SystemLengthConverterToKilometers;
InitailPosStar1.z /= SystemLengthConverterToKilometers;
InitailPosStar2.x /= SystemLengthConverterToKilometers;
InitailPosStar2.y /= SystemLengthConverterToKilometers;
InitailPosStar2.z /= SystemLengthConverterToKilometers;
//Taking input velocities into our units
InitailVelStar1.x /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar1.y /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar1.z /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar2.x /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar2.y /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
InitailVelStar2.z /= (SystemLengthConverterToKilometers/SystemTimeConverterToSeconds);
//Taking the run times into our units
BranchRunTime *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStartTimeStar1 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStopTimeStar1 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStartTimeStar2 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
GrowthStopTimeStar2 *= (60.0*60.0*24.0)/SystemTimeConverterToSeconds;
if(BranchRunTime < GrowthStopTimeStar1)
{
printf("\nTSU Error: BranchRunTime is less than GrowthStopTimeStar1.\n");
exit(0);
}
if(BranchRunTime < GrowthStopTimeStar2)
{
printf("\nTSU Error: BranchRunTime is less than GrowthStopTimeStar2.\n");
exit(0);
}
//Recording info into the BranchRunParameters file
fprintf(BranchRunParameters, "\n RecordRate = %d", RecordRate);
fprintf(BranchRunParameters, "\n DrawRate = %d", DrawRate);
fclose(BranchRunParameters);
}
void readInTheInitialsStars()
{
//chdir("../");
//system("ls";
FILE *startFile = fopen("../FilesFromBuild/StartPosVelForce","rb");
if(startFile == NULL)
{
printf("\n\n The StartPosVelForce file does not exist\n\n");
exit(0);
}
fread(&StartTime, sizeof(float), 1, startFile);
fread(PosCPU, sizeof(float4), NumberElements, startFile);
fread(VelCPU, sizeof(float4), NumberElements, startFile);
fread(ForceCPU, sizeof(float4), NumberElements, startFile);
fclose(startFile);
}
void setInitialConditions()
{
for(int i = 0; i < NumberElementsStar1; i++)
{
PosCPU[i].x += InitailPosStar1.x;
PosCPU[i].y += InitailPosStar1.y;
PosCPU[i].z += InitailPosStar1.z;
VelCPU[i].x += InitailVelStar1.x;
VelCPU[i].y += InitailVelStar1.y;
VelCPU[i].z += InitailVelStar1.z;
}
for(int i = NumberElementsStar1; i < NumberElements; i++)
{
PosCPU[i].x += InitailPosStar2.x;
PosCPU[i].y += InitailPosStar2.y;
PosCPU[i].z += InitailPosStar2.z;
VelCPU[i].x += InitailVelStar2.x;
VelCPU[i].y += InitailVelStar2.y;
VelCPU[i].z += InitailVelStar2.z;
}
CenterOfView = getCenterOfMass();
}
__global__ void getForces(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int numberOfElements, float pressureIncrease1, float pressureIncrease2, float corePushBackReduction, float plasmaPushBackReduction, int gPUNumber, int gPUsUsed)
{
int id, ids, i, j, k;
float4 posMe, velMe, forceMe;
float4 partialForce;
double forceSumX, forceSumY, forceSumZ;
__shared__ float4 shPos[BLOCKSIZE];
__shared__ float4 shVel[BLOCKSIZE];
__shared__ float4 shForce[BLOCKSIZE];
id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
if(numberOfElements <= id)
{
printf("\n TSU error: id out of bounds in getForces. \n");
}
forceSumX = 0.0;
forceSumY = 0.0;
forceSumZ = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
velMe.x = vel[id].x;
velMe.y = vel[id].y;
velMe.z = vel[id].z;
velMe.w = vel[id].w;
forceMe.x = force[id].x;
forceMe.y = force[id].y;
forceMe.z = force[id].z;
forceMe.w = force[id].w;
for(k =0; k < gPUsUsed; k++)
{
for(j = 0; j < gridDim.x; j++)
{
shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
__syncthreads();
#pragma unroll 32
for(i = 0; i < blockDim.x; i++)
{
ids = i + blockDim.x*j + blockDim.x*gridDim.x*k;
if(id != ids)
{
if(id == 0 && ids == numberElementsStar1)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == numberElementsStar1 && ids == 0)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(id == 0 || id == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else if(ids == 0 || ids == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePushBackReduction);
}
else
{
partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPushBackReduction);
}
forceSumX += partialForce.x;
forceSumY += partialForce.y;
forceSumZ += partialForce.z;
}
}
__syncthreads();
}
}
force[id].x = (float)forceSumX;
force[id].y = (float)forceSumY;
force[id].z = (float)forceSumZ;
if(0 < id && id < numberElementsStar1)
{
vel[id].w += pressureIncrease1;
}
else if(numberElementsStar1 < id)
{
vel[id].w += pressureIncrease2;
}
}
__global__ void moveBodies(float4 *pos, float4 *vel, float4 *force, float dt, int gPUNumber)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
vel[id].x += (force[id].x/pos[id].w)*dt;
vel[id].y += (force[id].y/pos[id].w)*dt;
vel[id].z += (force[id].z/pos[id].w)*dt;
pos[id].x += vel[id].x*dt;
pos[id].y += vel[id].y*dt;
pos[id].z += vel[id].z*dt;
}
float starNbody(float time, float runTime, float dt, int gPUsUsed)
{
int tDraw = 0;
int tRecord = 0;
float pressureIncrease1, pressureIncrease2;
int offSet = NumberElements/gPUsUsed;
pressureIncrease1 = 1.0f;
pressureIncrease2 = 1.0f;
while(time < runTime)
{
//Getting forces
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
getForces<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, pressureIncrease1, pressureIncrease2, CorePushBackReduction, PlasmaPushBackReduction, i, gPUsUsed);
errorCheck("getForces");
}
//Moving elements
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
moveBodies<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], dt, i);
errorCheck("moveBodies");
}
cudaDeviceSynchronize();
errorCheck("cudaDeviceSynchronize");
//Sharing memory
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
for(int j = 0; j < gPUsUsed; j++)
{
if(i != j)
{
//printf(" pos i = %d j = %d\n", i, j);
//cudaMemcpyAsync(PosGPU[j] + (i*offSet)*sizeof(float4), PosGPU[i] + (i*offSet)*sizeof(float4), (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
cudaMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
errorCheck("cudaMemcpy Pos A");
//printf(" vel i = %d j = %d\n", i, j);
//cudaMemcpyAsync(VelGPU[j] + (i*offSet)*sizeof(float4), VelGPU[i] + (i*offSet)*sizeof(float4), (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
cudaMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
errorCheck("cudaMemcpy Vel");
}
}
}
cudaDeviceSynchronize();
errorCheck("cudaDeviceSynchronize");
//Increasing the plasma elements push back. I had to start a dt forward so I could get the blocks to sync.
if((GrowthStartTimeStar1 - dt) < time && time < (GrowthStopTimeStar1 - dt))
{
pressureIncrease1 = PercentForceIncreaseStar1;
}
else
{
pressureIncrease1 = 0.0f;
}
if((GrowthStartTimeStar2 - dt) < time && time < (GrowthStopTimeStar2 - dt))
{
pressureIncrease2 = PercentForceIncreaseStar2;
}
else
{
pressureIncrease2 = 0.0f;
}
if(tDraw == DrawRate)
{
//Because it is shared above it will only need to be copied from one GPU.
cudaSetDevice(0);
errorCheck("cudaSetDevice");
cudaMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Pos draw");
drawPicture();
tDraw = 0;
printf("\n Time in days = %f", time*SystemTimeConverterToSeconds/(60.0*60.0*24.0));
}
if(tRecord == RecordRate)
{
//Because it is shared above it will only need to be copied from one GPU.
cudaSetDevice(0);
errorCheck("cudaSetDevice");
cudaMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Pos record");
cudaMemcpy(VelCPU, VelGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Vel record");
recordPosAndVel(time);
tRecord = 0;
}
tDraw++;
tRecord++;
time += dt;
}
return(time - dt);
}
void control()
{
struct sigaction sa;
float time = StartTime;
int gPUsUsed;
clock_t startTimer, endTimer;
//Starting the timer.
startTimer = clock();
// Handling input from the screen.
sa.sa_handler = signalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler
if (sigaction(SIGINT, &sa, NULL) == -1)
{
printf("\nTSU Error: sigaction error\n");
}
// Creating branch folder and copying in all the files that contributed to making the branch run.
printf("\n Creating and loading folder for the branch run.\n");
createAndLoadFolderForNewBranchRun();
// Reading in the build parameters.
printf("\n Reading and setting the run parameters.\n");
readAndSetRunParameters();
// Reading in the branch parameters.
printf("\n Reading and setting the branch parameters.\n");
readAndSetBranchParameters();
// Allocating memory for CPU and GPU.
printf("\n Allocating memory on the GPU and CPU and opening positions and velocities file.\n");
allocateCPUMemory();
// Reading in the raw stars generated by the build program.
printf("\n Reading in the stars that were generated in the build program.\n");
readInTheInitialsStars();
// Setting initial conditions.
printf("\n Setting initial conditions for the branch run.\n");
setInitialConditions();
// Draw the intial configuration.
printf("\n Drawing initial picture.\n");
drawPicture();
// Seting up the GPUs.
printf("\n Setting up the GPU.\n");
gPUsUsed = deviceSetup();
// Running the simulation.
printf("\n Running the simulation.\n");
copyStarsUpToGPU(gPUsUsed);
time = starNbody(time, BranchRunTime, DT, gPUsUsed);
// Saving the the runs final positions and velosities.
printf("\n Saving the the runs final positions and velosities.\n");
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
// Saving any wanted stats about the run that you may want. I don't have anything to record as of yet.
printf("\n Saving any wanted stats about the run that you may want.\n");
//recordStarStats();
// Freeing memory.
printf("\n Cleaning up the run.\n");
cleanUp(gPUsUsed);
fclose(PosAndVelFile);
// Stopping timer and printing out run time.
endTimer = clock();
int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC;
int hours = seconds/3600;
int minutes = (seconds - hours*3600)/60;
seconds = seconds - hours*3600 - minutes*60;
printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds);
printf("\n The run has finished successfully \n\n");
exit(0);
}
int main(int argc, char** argv)
{
int xWindowSize = 2500;
int yWindowSize = 2500;
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(xWindowSize,yWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("Creating Stars");
glutReshapeFunc(reshape);
init();
glShadeModel(GL_SMOOTH);
glClearColor(0.0, 0.0, 0.0, 0.0);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutIdleFunc(control);
glutMainLoop();
return 0;
}
|
4291353949c24440f5935e1c858e1085341a52ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
//texture<double,1,hipReadModeElementType> tex_CDF;
//texture<float,1,hipReadModeElementType> tex_u;
__constant__ double c_CDF[1000];
//texture<>
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double s_u[128];
s_u[threadIdx.x]=u[i];
__syncthreads();
// s_CDF[threadIdx.x] =tex1Dfetch(tex_CDF,
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(c_CDF[x] >= s_u[threadIdx.x]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
hipBindTexture(0,tex_u,u_GPU,Nparticles * sizeof(double));
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
hipDeviceSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
hipFree(u_GPU);
hipFree(CDF_GPU);
hipFree(yj_GPU);
hipFree(xj_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 4291353949c24440f5935e1c858e1085341a52ec.cu | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
//texture<double,1,cudaReadModeElementType> tex_CDF;
//texture<float,1,cudaReadModeElementType> tex_u;
__constant__ double c_CDF[1000];
//texture<>
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double s_u[128];
s_u[threadIdx.x]=u[i];
__syncthreads();
// s_CDF[threadIdx.x] =tex1Dfetch(tex_CDF,
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(c_CDF[x] >= s_u[threadIdx.x]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
cudaBindTexture(0,tex_u,u_GPU,Nparticles * sizeof(double));
//KERNEL FUNCTION CALL
kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
cudaThreadSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
cudaFree(u_GPU);
cudaFree(CDF_GPU);
cudaFree(yj_GPU);
cudaFree(xj_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
c7ec52c2be8257d2980d799c4312345c44486493.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <sutil/vec_math.h>
#include "sphere.h"
#define float3_as_ints( u ) float_as_int( u.x ), float_as_int( u.y ), float_as_int( u.z )
extern "C" __global__ void __intersection__sphere()
{
const sphere::SphereHitGroupData* hit_group_data = reinterpret_cast<sphere::SphereHitGroupData*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin();
const float ray_tmax = optixGetRayTmax();
const float3 O = ray_orig - hit_group_data->sphere.center;
const float l = 1.0f / length( ray_dir );
const float3 D = ray_dir * l;
const float radius = hit_group_data->sphere.radius;
float b = dot( O, D );
float c = dot( O, O ) - radius * radius;
float disc = b * b - c;
if( disc > 0.0f )
{
float sdisc = sqrtf( disc );
float root1 = ( -b - sdisc );
float root11 = 0.0f;
bool check_second = true;
const bool do_refine = fabsf( root1 ) > ( 10.0f * radius );
if( do_refine )
{
// refine root1
float3 O1 = O + root1 * D;
b = dot( O1, D );
c = dot( O1, O1 ) - radius * radius;
disc = b * b - c;
if( disc > 0.0f )
{
sdisc = sqrtf( disc );
root11 = ( -b - sdisc );
}
}
float t;
float3 normal;
t = ( root1 + root11 ) * l;
if( t > ray_tmin && t < ray_tmax )
{
normal = ( O + ( root1 + root11 ) * D ) / radius;
if( optixReportIntersection( t, 0, float3_as_ints( normal ), float_as_int( radius ) ) )
check_second = false;
}
if( check_second )
{
float root2 = ( -b + sdisc ) + ( do_refine ? root1 : 0 );
t = root2 * l;
normal = ( O + root2 * D ) / radius;
if( t > ray_tmin && t < ray_tmax )
optixReportIntersection( t, 0, float3_as_ints( normal ), float_as_int( radius ) );
}
}
}
| c7ec52c2be8257d2980d799c4312345c44486493.cu | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <sutil/vec_math.h>
#include "sphere.h"
#define float3_as_ints( u ) float_as_int( u.x ), float_as_int( u.y ), float_as_int( u.z )
extern "C" __global__ void __intersection__sphere()
{
const sphere::SphereHitGroupData* hit_group_data = reinterpret_cast<sphere::SphereHitGroupData*>( optixGetSbtDataPointer() );
const float3 ray_orig = optixGetWorldRayOrigin();
const float3 ray_dir = optixGetWorldRayDirection();
const float ray_tmin = optixGetRayTmin();
const float ray_tmax = optixGetRayTmax();
const float3 O = ray_orig - hit_group_data->sphere.center;
const float l = 1.0f / length( ray_dir );
const float3 D = ray_dir * l;
const float radius = hit_group_data->sphere.radius;
float b = dot( O, D );
float c = dot( O, O ) - radius * radius;
float disc = b * b - c;
if( disc > 0.0f )
{
float sdisc = sqrtf( disc );
float root1 = ( -b - sdisc );
float root11 = 0.0f;
bool check_second = true;
const bool do_refine = fabsf( root1 ) > ( 10.0f * radius );
if( do_refine )
{
// refine root1
float3 O1 = O + root1 * D;
b = dot( O1, D );
c = dot( O1, O1 ) - radius * radius;
disc = b * b - c;
if( disc > 0.0f )
{
sdisc = sqrtf( disc );
root11 = ( -b - sdisc );
}
}
float t;
float3 normal;
t = ( root1 + root11 ) * l;
if( t > ray_tmin && t < ray_tmax )
{
normal = ( O + ( root1 + root11 ) * D ) / radius;
if( optixReportIntersection( t, 0, float3_as_ints( normal ), float_as_int( radius ) ) )
check_second = false;
}
if( check_second )
{
float root2 = ( -b + sdisc ) + ( do_refine ? root1 : 0 );
t = root2 * l;
normal = ( O + root2 * D ) / radius;
if( t > ray_tmin && t < ray_tmax )
optixReportIntersection( t, 0, float3_as_ints( normal ), float_as_int( radius ) );
}
}
}
|
fe718d220c4d55873250e05b608fba3ee2ad5476.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab3_cuda.h"
#define SQUARE_BLOCKSIZE 32
#define LINEAR_BLOCKSIZE 1024
#define JACOBI_TOLERANCE 0.001
__device__ double *S, *E, *e, *c, *s, *temp_maximums;
__device__ bool *changed;
__device__ int *ind, *state, N, *temp_indices;
int N_device;
__device__ inline int INDEX(int i1, int i2, int l1, int l2) {
return i1 * l2 + i2;
}
__global__ void printMat(double *mat, int n1, int n2) {
printf("\n");
for (int i = 0; i < n1; i++) {
for (int j = 0; j < n2; j++) {
printf("%f ", mat[INDEX(i, j, n1, n2)]);
}
printf("\n");
}
printf("\n");
}
__global__ void printVec(double *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%f ", vec[i]);
}
printf("\n");
printf("\n");
}
__device__ void printVecDev(double *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%f ", vec[i]);
}
printf("\n");
printf("\n");
}
__global__ void printVec(bool *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%d ", vec[i]);
}
printf("\n");
printf("\n");
}
__global__ void printVec(int *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%d ", vec[i]);
}
printf("\n");
printf("\n");
}
// TODO
__device__ void MAXIND(int k, int *result) {
int m = k + 1, i;
double max_ = fabs(S[INDEX(k, m, N, N)]), temp;
for (i = k + 2; i < N; i++) {
temp = fabs(S[INDEX(k, i, N, N)]);
if (temp > max_) {
m = i;
max_ = temp;
}
}
*result = m;
}
__device__ void UPDATE(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) {
e[k] = 0;
}
double change = fabs(ek_prev - e[k]);
// printf("%f\n", change);
if (changed[k] && change < JACOBI_TOLERANCE) {
changed[k] = false;
(*state)--;
} else if ((!changed[k]) && change > JACOBI_TOLERANCE) {
changed[k] = true;
(*state)++;
}
}
__device__ void ROTATE(int k, int l, int i, int j) {
double Skl = S[INDEX(k, l, N, N)], Sij = S[INDEX(i, j, N, N)];
S[INDEX(k, l, N, N)] = (*c) * Skl - (*s) * Sij;
S[INDEX(i, j, N, N)] = (*s) * Skl + (*c) * Sij;
}
__global__ void INIT0(double *S_in, double *E_in, double *e_in, double *c_in,
double *s_in, double *temp_maximums_in, bool *changed_in,
int *ind_in, int *state_in, int N_in,
int *temp_indices_in) {
S = S_in;
E = E_in;
e = e_in;
c = c_in;
s = s_in;
temp_maximums = temp_maximums_in;
changed = changed_in;
ind = ind_in;
state = state_in;
N = N_in;
printf("%f %d %d\n", S[0], N, N_in);
temp_indices = temp_indices_in;
}
__global__ void INIT1() {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
printf("%f %d\n", S[0], N);
}
if (i < N * N) {
E[i] = ((i / N) == (i % N));
}
}
__global__ void INIT2() { *state = N; }
// TODO
__global__ void INIT3() {
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k < N) {
MAXIND(k, &ind[k]);
e[k] = S[INDEX(k, k, N, N)];
changed[k] = true;
}
}
__global__ void BEST_M_PARALLEL(int *m, int offset, int num_elements) {
int tid = threadIdx.x;
int gid = tid + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int max_indice = 0;
double max_ = fabs(S[INDEX(0, ind[0], N, N)]);
double temp;
int i, k;
for (i = gid; i < num_elements; i += stride) {
k = i + offset;
temp = fabs(S[INDEX(k, ind[k], N, N)]);
if (temp > max_) {
max_ = temp;
max_indice = k;
}
}
__shared__ int max_ms_local[LINEAR_BLOCKSIZE];
__shared__ double maximums[LINEAR_BLOCKSIZE];
max_ms_local[tid] = max_indice;
maximums[tid] = max_;
__syncthreads();
for (int size = LINEAR_BLOCKSIZE / 2; size > 0; size /= 2) {
if (tid < size && maximums[tid] < maximums[tid + size]) {
maximums[tid] = maximums[tid + size];
max_ms_local[tid] = max_ms_local[tid + size];
}
__syncthreads();
}
if (tid == 0) {
temp_maximums[blockIdx.x] = max_ms_local[0];
temp_indices[blockIdx.x] = maximums[0];
*m = max_ms_local[0];
}
}
__host__ void BEST_M_HOST(int *dev_m) {
int numblocks = (N_device - 1 + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
// printf("Kernels %d %d\n", numblocks, LINEAR_BLOCKSIZE);
hipLaunchKernelGGL(( BEST_M_PARALLEL), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_m, 1, N_device - 2);
if (numblocks > 1) {
hipLaunchKernelGGL(( BEST_M_PARALLEL), dim3(1), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_m, 0, numblocks);
}
}
// TODO
__global__ void BEST_M(int *m) {
*m = 0;
int k;
double max_ = fabs(S[INDEX(*m, ind[*m], N, N)]), temp;
for (k = 1; k < N - 1; k++) {
temp = fabs(S[INDEX(k, ind[k], N, N)]);
if (temp > max_) {
*m = k;
max_ = temp;
}
}
}
__global__ void GET_S_C(int *k, int *l, int *m, double *t) {
*k = *m;
*l = ind[*m];
double p = S[INDEX(*k, *l, N, N)];
double y = (e[*l] - e[*k]) / 2;
double d = fabs(y) + sqrt(p * p + y * y);
double r = sqrt(p * p + d * d);
*c = d / r;
*s = p / r;
*t = p * p / d;
if (y < 0) {
*s = -(*s);
*t = -(*t);
}
S[INDEX(*k, *l, N, N)] = 0.0;
}
__global__ void UPDATE_COMBINED(int *k, int *l, double *t) {
UPDATE(*k, -1 * (*t));
UPDATE(*l, *t);
}
__global__ void ROTATE_MULTIPLE1(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < *k) {
ROTATE(i, *k, i, *l);
}
}
__global__ void ROTATE_MULTIPLE2(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x + (*k) + 1;
if (i < *l) {
ROTATE(*k, i, i, *l);
}
}
__global__ void ROTATE_MULTIPLE3(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x + (*l) + 1;
if (i < N) {
ROTATE(*k, i, *l, i);
}
}
__global__ void UPDATE_E(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double Eik, Eil;
if (i < N) {
Eik = E[INDEX(i, *k, N, N)];
Eil = E[INDEX(i, *l, N, N)];
E[INDEX(i, *k, N, N)] = (*c) * Eik - (*s) * Eil;
E[INDEX(i, *l, N, N)] = (*s) * Eik + (*c) * Eil;
}
}
__global__ void MAXIND_PARALLEL(int *k_pointer, int problem_size) {
// int m = k + 1, i;
// double max_ = fabs(S[INDEX(k, m, N, N)]), temp;
// for (i = k + 2; i < N; i++) {
// temp = fabs(S[INDEX(k, i, N, N)]);
// if (temp > max_) {
// m = i;
// max_ = temp;
// }
// }
// *result = m;
int k = *k_pointer;
int num_elements = N - k - 2;
int offset = k + 2;
if (problem_size != -1) {
num_elements = problem_size;
offset = 0;
}
int tid = threadIdx.x;
int gid = tid + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int m = k + 1;
double max_ = fabs(S[INDEX(k, m, N, N)]), temp;
int i, i_off;
for (i = gid; i < num_elements; i += stride) {
i_off = i + offset;
temp = fabs(S[INDEX(k, i_off, N, N)]);
if (temp > max_) {
m = i_off;
max_ = temp;
}
}
__shared__ int m_shared[LINEAR_BLOCKSIZE];
__shared__ double max_shared[LINEAR_BLOCKSIZE];
m_shared[tid] = m;
max_shared[tid] = max_;
__syncthreads();
for (int size = LINEAR_BLOCKSIZE / 2; size > 0; size /= 2) {
if (tid < size && max_shared[tid] < max_shared[tid + size]) {
max_shared[tid] = max_shared[tid + size];
m_shared[tid] = m_shared[tid + size];
}
__syncthreads();
}
if (tid == 0) {
temp_maximums[blockIdx.x] = m_shared[0];
temp_indices[blockIdx.x] = max_shared[0];
ind[k] = m_shared[0];
}
}
__host__ void UPDATE_IND_PARALLEL(int *dev_k, int *dev_l) {
int numblocks = (N_device + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
// printf("Kernels %d %d\n", numblocks, LINEAR_BLOCKSIZE);
hipLaunchKernelGGL(( MAXIND_PARALLEL), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_k, -1);
if (numblocks > 1) {
hipLaunchKernelGGL(( MAXIND_PARALLEL), dim3(1), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_k, numblocks);
}
hipLaunchKernelGGL(( MAXIND_PARALLEL), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_l, -1);
if (numblocks > 1) {
hipLaunchKernelGGL(( MAXIND_PARALLEL), dim3(1), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_l, numblocks);
}
}
__global__ void UPDATE_IND(int *k, int *l) {
MAXIND(*k, &ind[*k]);
MAXIND(*l, &ind[*l]);
}
void JACOBI(int n, double *dev_E, double *dev_e, double *dev_S) {
N_device = n;
int *dev_m, *dev_k, *dev_l;
double *dev_t_;
int state_local = n;
int numblocks = (n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipMalloc(&dev_m, sizeof(int));
hipMalloc(&dev_k, sizeof(int));
hipMalloc(&dev_l, sizeof(int));
hipMalloc(&dev_t_, sizeof(double));
int *tmp_state;
int *tmp_ind;
bool *tmp_changed;
double *tmp_c, *tmp_s, *tmp_temp_maximums;
int *tmp_temp_indices;
hipMalloc(&tmp_state, sizeof(int));
hipMalloc(&tmp_ind, sizeof(int) * n);
hipMalloc(&tmp_changed, sizeof(bool) * n);
hipMalloc(&tmp_c, sizeof(double));
hipMalloc(&tmp_s, sizeof(double));
hipMalloc(&tmp_temp_maximums, sizeof(double) * numblocks);
hipMalloc(&tmp_temp_indices, sizeof(int) * numblocks);
hipLaunchKernelGGL(( INIT0), dim3(1), dim3(1), 0, 0, dev_S, dev_E, dev_e, tmp_c, tmp_s, tmp_temp_maximums,
tmp_changed, tmp_ind, tmp_state, n, tmp_temp_indices);
// hipMemcpyToSymbol("state", &tmp_state, sizeof(void *));
// hipMemcpyToSymbol("ind", &tmp_ind, sizeof(void *));
// hipMemcpyToSymbol("changed", &tmp_changed, sizeof(void *));
// hipMemcpyToSymbol("c", &tmp_c, sizeof(void *));
// hipMemcpyToSymbol("s", &tmp_s, sizeof(void *));
// hipMemcpyToSymbol("temp_maximums", &tmp_temp_maximums, sizeof(void *));
// hipMemcpyToSymbol("temp_indices", &tmp_temp_indices, sizeof(void *));
// hipMemcpyToSymbol("S", &dev_S, sizeof(void *));
// hipMemcpyToSymbol("E", &dev_E, sizeof(void *));
// hipMemcpyToSymbol("e", &dev_e, sizeof(void *));
// hipMemcpyToSymbol("N", &n, sizeof(int));
numblocks = (n * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipLaunchKernelGGL(( INIT1), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, );
hipLaunchKernelGGL(( INIT2), dim3(1), dim3(1), 0, 0, );
numblocks = (n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipLaunchKernelGGL(( INIT3), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, );
int count = 0;
int checkpoint = max(1, (n * n) / 100);
// printf("%d",checkpoint);
while (state_local > 0) {
// break;
count++;
// BEST_M<<<1, 1>>>(dev_m, n, dev_S, dev_ind);
BEST_M_HOST(dev_m);
hipLaunchKernelGGL(( GET_S_C), dim3(1), dim3(1), 0, 0, dev_k, dev_l, dev_m, dev_t_);
hipLaunchKernelGGL(( UPDATE_COMBINED), dim3(1), dim3(1), 0, 0, dev_k, dev_l, dev_t_);
hipLaunchKernelGGL(( ROTATE_MULTIPLE1), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_k, dev_l);
hipLaunchKernelGGL(( ROTATE_MULTIPLE2), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_k, dev_l);
hipLaunchKernelGGL(( ROTATE_MULTIPLE3), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_k, dev_l);
hipLaunchKernelGGL(( UPDATE_E), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_k, dev_l);
// UPDATE_IND<<<1, 1>>>(dev_k, dev_l, dev_ind, n, dev_S);
UPDATE_IND_PARALLEL(dev_k, dev_l);
if (count % checkpoint == 0) {
// printf("hey\n");
hipMemcpy(&state_local, tmp_state, sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("Checkpoint= %d\tState= %d\tIterNumber= %d\n", count / checkpoint,
state_local, count);
}
}
// printf("%d %d\n", state, count);
hipFree(tmp_state);
hipFree(tmp_ind);
hipFree(tmp_changed);
hipFree(dev_m);
hipFree(dev_k);
hipFree(dev_l);
hipFree(tmp_c);
hipFree(tmp_s);
hipFree(dev_t_);
hipFree(tmp_temp_maximums);
hipFree(tmp_temp_indices);
}
__global__ void ODD_EVEN_SORT(double *arr, int *indices, int n,
bool *converged) {
int index_global = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
*converged = false;
bool odd_iter = false;
double temp;
int to_see, to_see_next, index_local, i, temp_int;
for (i = index_global; i < n; i += stride) {
indices[i] = i;
}
while (!(*converged)) {
__syncthreads();
*converged = true;
for (index_local = index_global; index_local < n / 2;
index_local += stride) {
if (odd_iter && 2 * index_local + 2 < n) {
to_see = 2 * index_local + 1;
to_see_next = 2 * index_local + 2;
if (arr[to_see] < arr[to_see_next]) {
temp = arr[to_see_next];
arr[to_see_next] = arr[to_see];
arr[to_see] = temp;
temp_int = indices[to_see_next];
indices[to_see_next] = indices[to_see];
indices[to_see] = temp_int;
*converged = false;
}
} else if (!odd_iter && 2 * index_local + 1 < n) {
to_see = 2 * index_local;
to_see_next = 2 * index_local + 1;
if (arr[to_see] < arr[to_see_next]) {
temp = arr[to_see_next];
arr[to_see_next] = arr[to_see];
arr[to_see] = temp;
temp_int = indices[to_see_next];
indices[to_see_next] = indices[to_see];
indices[to_see] = temp_int;
*converged = false;
}
}
}
odd_iter = !odd_iter;
}
}
__global__ void TRANSPOSE(double *M, int m, int n, double *M_T) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n * m) {
M_T[i] = M[INDEX(i % m, i / m, m, n)];
}
}
__global__ void MATMUL2(int p, int q, int r, double *A, double *B, double *C) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i;
double sum = 0;
if (row < p && col < r) {
for (i = 0; i < q; i++) {
sum += A[INDEX(row, i, p, q)] * B[INDEX(i, col, q, r)];
}
C[INDEX(row, col, p, r)] = sum;
}
}
__global__ void ARRANGE(int *indices, double *old_E, double *new_E, int n1,
int n2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n1 * n2) {
new_E[i] = old_E[INDEX(i / n2, indices[i % n2], n1, n2)];
}
}
__global__ void GET_SINGULAR_VALS(int n, double *e, double *SIGMA,
double *SIGMA_INV) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double sqrt_;
if (i < n) {
sqrt_ = sqrt(e[i]);
SIGMA[i] = sqrt_;
SIGMA_INV[i] = 1 / sqrt_;
}
}
// TODO
__global__ void GET_EIGEN_SUM(double *eigen_total, double *e, int n) {
int i;
*eigen_total = 0;
for (i = 0; i < n; i++) {
*eigen_total += e[i];
}
}
__global__ void MULTIPLY_SIGMA_INV(int m, int n, double *M, double *V,
double *SIGMA_INV, double *U) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i;
double sum = 0;
if (row < m && col < m) {
if (col < n) {
for (i = 0; i < n; i++) {
sum += M[INDEX(row, i, m, n)] * V[INDEX(i, col, n, n)];
}
U[INDEX(row, col, m, m)] = sum * SIGMA_INV[col];
} else {
U[INDEX(row, col, m, m)] = 0;
}
}
}
void GET_U(int m, int n, double *dev_M, double *dev_V, double *dev_SIGMA_INV,
double *dev_U) {
dim3 dimBlock(SQUARE_BLOCKSIZE, SQUARE_BLOCKSIZE);
dim3 dimGrid((m + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE,
(m + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE);
hipLaunchKernelGGL(( MULTIPLY_SIGMA_INV), dim3(dimGrid), dim3(dimBlock), 0, 0, m, n, dev_M, dev_V, dev_SIGMA_INV,
dev_U);
}
__global__ void GET_RETENTION(int *k, int n, double *e, double *eigen_total,
double retention) {
int k_retended = 0;
double retention_done = 0;
int i;
for (i = 0; i < n; i++) {
retention_done += 100 * e[i] / *eigen_total;
k_retended++;
if (retention_done >= retention) {
break;
}
}
*k = k_retended;
}
__global__ void GET_W(int k_retended, int n, double *W, double *E) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n * k_retended) {
W[i] = E[INDEX(i / k_retended, i % k_retended, n, n)];
}
}
__global__ void testDev(double *U, double *V_T, double *SIGMA, double *M, int m,
int n, double *UV, double *new_M) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
UV[INDEX(i, j, m, n)] = U[INDEX(i, j, m, m)] * SIGMA[j];
}
}
double max_error = 0;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
double sum = 0;
for (int k = 0; k < n; k++) {
sum += UV[INDEX(i, k, m, n)] * V_T[INDEX(k, j, n, n)];
}
new_M[INDEX(i, j, m, n)] = sum;
// printf("%f\n",M[INDEX(i, j, m, n)] - sum);
if (fabs(M[INDEX(i, j, m, n)] - sum) > max_error) {
max_error = fabs(M[INDEX(i, j, m, n)] - sum);
}
}
}
printf("Max error = %f", max_error);
}
void test(double *U, double *SIGMA, double *V_T, double *M, int m, int n) {
double *temp, *new_M;
hipMalloc(&temp, sizeof(double) * m * n);
hipMalloc(&new_M, sizeof(double) * m * n);
hipLaunchKernelGGL(( testDev), dim3(1), dim3(1), 0, 0, U, V_T, SIGMA, M, m, n, temp, new_M);
//hipLaunchKernelGGL(( printMat), dim3(1), dim3(1), 0, 0, new_M, m, n);
//hipLaunchKernelGGL(( printMat), dim3(1), dim3(1), 0, 0, M, m, n);
hipFree(temp);
hipFree(new_M);
}
void SVD_and_PCA(int m, int n, double *D, double **U, double **SIGMA,
double **V_T, int *SIGMAm, int *SIGMAn, double **D_HAT, int *K,
int retention) {
double *dev_M, *dev_M_T, *dev_S, *dev_e, *dev_E, *dev_new_E, *dev_eigen_total,
*dev_SIGMA, *dev_SIGMA_INV, *dev_V_T, *dev_U, *dev_W, *dev_D_HAT;
int *dev_k, *dev_indices,
numblocks = (m * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipMalloc(&dev_M, sizeof(double) * m * n);
hipMemcpy(dev_M, D, sizeof(double) * m * n, hipMemcpyHostToDevice);
hipMalloc(&dev_M_T, sizeof(double) * m * n);
hipLaunchKernelGGL(( TRANSPOSE), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_M, m, n, dev_M_T);
hipMalloc(&dev_S, sizeof(double) * n * n);
dim3 dimBlock(SQUARE_BLOCKSIZE, SQUARE_BLOCKSIZE);
dim3 dimGrid((n + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE,
(n + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE);
hipLaunchKernelGGL(( MATMUL2), dim3(dimGrid), dim3(dimBlock), 0, 0, n, m, n, dev_M_T, dev_M, dev_S);
hipFree(dev_M_T);
hipMalloc(&dev_e, sizeof(double) * n);
hipMalloc(&dev_E, sizeof(double) * n * n);
JACOBI(n, dev_E, dev_e, dev_S);
hipFree(dev_S);
hipMalloc(&dev_indices, sizeof(int) * n);
hipMalloc(&dev_new_E, sizeof(double) * n * n);
bool *converged;
hipMalloc(&converged, sizeof(bool));
// numblocks = (((n + 1) / 2) + BLOCKSIZE - 1) / BLOCKSIZE;
// printf("num %d\n", numblocks);
hipLaunchKernelGGL(( ODD_EVEN_SORT), dim3(1), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_e, dev_indices, n, converged);
hipFree(converged);
numblocks = (n * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipLaunchKernelGGL(( ARRANGE), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_indices, dev_E, dev_new_E, n, n);
// printVec<<<1, 1>>>(dev_e, n);
// printVec<<<1, 1>>>(dev_indices, n);
hipFree(dev_indices);
hipFree(dev_E);
dev_E = dev_new_E;
hipMalloc(&dev_SIGMA, sizeof(double) * n);
hipMalloc(&dev_SIGMA_INV, sizeof(double) * n);
numblocks = (n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipLaunchKernelGGL(( GET_SINGULAR_VALS), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, n, dev_e, dev_SIGMA,
dev_SIGMA_INV);
hipMalloc(&dev_eigen_total, sizeof(int));
hipLaunchKernelGGL(( GET_EIGEN_SUM), dim3(1), dim3(1), 0, 0, dev_eigen_total, dev_e, n);
hipMalloc(&dev_V_T, sizeof(double) * n * n);
numblocks = (n * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipLaunchKernelGGL(( TRANSPOSE), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, dev_E, n, n, dev_V_T);
hipMalloc(&dev_U, sizeof(double) * m * m);
GET_U(m, n, dev_M, dev_E, dev_SIGMA_INV, dev_U);
hipFree(dev_SIGMA_INV);
hipMalloc(&dev_k, sizeof(int));
hipLaunchKernelGGL(( GET_RETENTION), dim3(1), dim3(1), 0, 0, dev_k, n, dev_e, dev_eigen_total, retention);
hipFree(dev_eigen_total);
hipFree(dev_e);
hipMemcpy(K, dev_k, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_k);
hipMalloc(&dev_W, sizeof(double) * n * (*K));
hipMalloc(&dev_D_HAT, sizeof(double) * m * (*K));
numblocks = (n * (*K) + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
hipLaunchKernelGGL(( GET_W), dim3(numblocks), dim3(LINEAR_BLOCKSIZE), 0, 0, *K, n, dev_W, dev_E);
hipFree(dev_E);
dimGrid = dim3((*K + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE,
(m + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE);
hipLaunchKernelGGL(( MATMUL2), dim3(dimGrid), dim3(dimBlock), 0, 0, m, n, *K, dev_M, dev_W, dev_D_HAT);
// test(dev_U, dev_SIGMA, dev_V_T, dev_M, m, n);
hipFree(dev_W);
hipFree(dev_M);
*U = (double *)malloc(sizeof(double) * m * m);
hipMemcpy(*U, dev_U, sizeof(double) * m * m, hipMemcpyDeviceToHost);
hipFree(dev_U);
*SIGMA = (double *)malloc(sizeof(double) * n);
hipMemcpy(*SIGMA, dev_SIGMA, sizeof(double) * n, hipMemcpyDeviceToHost);
hipFree(dev_SIGMA);
*V_T = (double *)malloc(sizeof(double) * n * n);
hipMemcpy(*V_T, dev_V_T, sizeof(double) * n * n, hipMemcpyDeviceToHost);
hipFree(dev_V_T);
*D_HAT = (double *)malloc(sizeof(double) * m * (*K));
hipMemcpy(*D_HAT, dev_D_HAT, sizeof(double) * m * (*K),
hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( printMat), dim3(1), dim3(1), 0, 0, dev_D_HAT, m, *K);
hipFree(dev_D_HAT);
hipDeviceSynchronize();
*SIGMAm = m;
*SIGMAn = n;
}
| fe718d220c4d55873250e05b608fba3ee2ad5476.cu | #include "lab3_cuda.h"
#define SQUARE_BLOCKSIZE 32
#define LINEAR_BLOCKSIZE 1024
#define JACOBI_TOLERANCE 0.001
__device__ double *S, *E, *e, *c, *s, *temp_maximums;
__device__ bool *changed;
__device__ int *ind, *state, N, *temp_indices;
int N_device;
__device__ inline int INDEX(int i1, int i2, int l1, int l2) {
return i1 * l2 + i2;
}
__global__ void printMat(double *mat, int n1, int n2) {
printf("\n");
for (int i = 0; i < n1; i++) {
for (int j = 0; j < n2; j++) {
printf("%f ", mat[INDEX(i, j, n1, n2)]);
}
printf("\n");
}
printf("\n");
}
__global__ void printVec(double *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%f ", vec[i]);
}
printf("\n");
printf("\n");
}
__device__ void printVecDev(double *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%f ", vec[i]);
}
printf("\n");
printf("\n");
}
__global__ void printVec(bool *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%d ", vec[i]);
}
printf("\n");
printf("\n");
}
__global__ void printVec(int *vec, int n1) {
printf("\n");
for (int i = 0; i < n1; i++) {
printf("%d ", vec[i]);
}
printf("\n");
printf("\n");
}
// TODO
__device__ void MAXIND(int k, int *result) {
int m = k + 1, i;
double max_ = fabs(S[INDEX(k, m, N, N)]), temp;
for (i = k + 2; i < N; i++) {
temp = fabs(S[INDEX(k, i, N, N)]);
if (temp > max_) {
m = i;
max_ = temp;
}
}
*result = m;
}
__device__ void UPDATE(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) {
e[k] = 0;
}
double change = fabs(ek_prev - e[k]);
// printf("%f\n", change);
if (changed[k] && change < JACOBI_TOLERANCE) {
changed[k] = false;
(*state)--;
} else if ((!changed[k]) && change > JACOBI_TOLERANCE) {
changed[k] = true;
(*state)++;
}
}
__device__ void ROTATE(int k, int l, int i, int j) {
double Skl = S[INDEX(k, l, N, N)], Sij = S[INDEX(i, j, N, N)];
S[INDEX(k, l, N, N)] = (*c) * Skl - (*s) * Sij;
S[INDEX(i, j, N, N)] = (*s) * Skl + (*c) * Sij;
}
__global__ void INIT0(double *S_in, double *E_in, double *e_in, double *c_in,
double *s_in, double *temp_maximums_in, bool *changed_in,
int *ind_in, int *state_in, int N_in,
int *temp_indices_in) {
S = S_in;
E = E_in;
e = e_in;
c = c_in;
s = s_in;
temp_maximums = temp_maximums_in;
changed = changed_in;
ind = ind_in;
state = state_in;
N = N_in;
printf("%f %d %d\n", S[0], N, N_in);
temp_indices = temp_indices_in;
}
__global__ void INIT1() {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0) {
printf("%f %d\n", S[0], N);
}
if (i < N * N) {
E[i] = ((i / N) == (i % N));
}
}
__global__ void INIT2() { *state = N; }
// TODO
__global__ void INIT3() {
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k < N) {
MAXIND(k, &ind[k]);
e[k] = S[INDEX(k, k, N, N)];
changed[k] = true;
}
}
__global__ void BEST_M_PARALLEL(int *m, int offset, int num_elements) {
int tid = threadIdx.x;
int gid = tid + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int max_indice = 0;
double max_ = fabs(S[INDEX(0, ind[0], N, N)]);
double temp;
int i, k;
for (i = gid; i < num_elements; i += stride) {
k = i + offset;
temp = fabs(S[INDEX(k, ind[k], N, N)]);
if (temp > max_) {
max_ = temp;
max_indice = k;
}
}
__shared__ int max_ms_local[LINEAR_BLOCKSIZE];
__shared__ double maximums[LINEAR_BLOCKSIZE];
max_ms_local[tid] = max_indice;
maximums[tid] = max_;
__syncthreads();
for (int size = LINEAR_BLOCKSIZE / 2; size > 0; size /= 2) {
if (tid < size && maximums[tid] < maximums[tid + size]) {
maximums[tid] = maximums[tid + size];
max_ms_local[tid] = max_ms_local[tid + size];
}
__syncthreads();
}
if (tid == 0) {
temp_maximums[blockIdx.x] = max_ms_local[0];
temp_indices[blockIdx.x] = maximums[0];
*m = max_ms_local[0];
}
}
__host__ void BEST_M_HOST(int *dev_m) {
int numblocks = (N_device - 1 + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
// printf("Kernels %d %d\n", numblocks, LINEAR_BLOCKSIZE);
BEST_M_PARALLEL<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_m, 1, N_device - 2);
if (numblocks > 1) {
BEST_M_PARALLEL<<<1, LINEAR_BLOCKSIZE>>>(dev_m, 0, numblocks);
}
}
// TODO
__global__ void BEST_M(int *m) {
*m = 0;
int k;
double max_ = fabs(S[INDEX(*m, ind[*m], N, N)]), temp;
for (k = 1; k < N - 1; k++) {
temp = fabs(S[INDEX(k, ind[k], N, N)]);
if (temp > max_) {
*m = k;
max_ = temp;
}
}
}
__global__ void GET_S_C(int *k, int *l, int *m, double *t) {
*k = *m;
*l = ind[*m];
double p = S[INDEX(*k, *l, N, N)];
double y = (e[*l] - e[*k]) / 2;
double d = fabs(y) + sqrt(p * p + y * y);
double r = sqrt(p * p + d * d);
*c = d / r;
*s = p / r;
*t = p * p / d;
if (y < 0) {
*s = -(*s);
*t = -(*t);
}
S[INDEX(*k, *l, N, N)] = 0.0;
}
__global__ void UPDATE_COMBINED(int *k, int *l, double *t) {
UPDATE(*k, -1 * (*t));
UPDATE(*l, *t);
}
__global__ void ROTATE_MULTIPLE1(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < *k) {
ROTATE(i, *k, i, *l);
}
}
__global__ void ROTATE_MULTIPLE2(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x + (*k) + 1;
if (i < *l) {
ROTATE(*k, i, i, *l);
}
}
__global__ void ROTATE_MULTIPLE3(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x + (*l) + 1;
if (i < N) {
ROTATE(*k, i, *l, i);
}
}
__global__ void UPDATE_E(int *k, int *l) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double Eik, Eil;
if (i < N) {
Eik = E[INDEX(i, *k, N, N)];
Eil = E[INDEX(i, *l, N, N)];
E[INDEX(i, *k, N, N)] = (*c) * Eik - (*s) * Eil;
E[INDEX(i, *l, N, N)] = (*s) * Eik + (*c) * Eil;
}
}
__global__ void MAXIND_PARALLEL(int *k_pointer, int problem_size) {
// int m = k + 1, i;
// double max_ = fabs(S[INDEX(k, m, N, N)]), temp;
// for (i = k + 2; i < N; i++) {
// temp = fabs(S[INDEX(k, i, N, N)]);
// if (temp > max_) {
// m = i;
// max_ = temp;
// }
// }
// *result = m;
int k = *k_pointer;
int num_elements = N - k - 2;
int offset = k + 2;
if (problem_size != -1) {
num_elements = problem_size;
offset = 0;
}
int tid = threadIdx.x;
int gid = tid + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int m = k + 1;
double max_ = fabs(S[INDEX(k, m, N, N)]), temp;
int i, i_off;
for (i = gid; i < num_elements; i += stride) {
i_off = i + offset;
temp = fabs(S[INDEX(k, i_off, N, N)]);
if (temp > max_) {
m = i_off;
max_ = temp;
}
}
__shared__ int m_shared[LINEAR_BLOCKSIZE];
__shared__ double max_shared[LINEAR_BLOCKSIZE];
m_shared[tid] = m;
max_shared[tid] = max_;
__syncthreads();
for (int size = LINEAR_BLOCKSIZE / 2; size > 0; size /= 2) {
if (tid < size && max_shared[tid] < max_shared[tid + size]) {
max_shared[tid] = max_shared[tid + size];
m_shared[tid] = m_shared[tid + size];
}
__syncthreads();
}
if (tid == 0) {
temp_maximums[blockIdx.x] = m_shared[0];
temp_indices[blockIdx.x] = max_shared[0];
ind[k] = m_shared[0];
}
}
__host__ void UPDATE_IND_PARALLEL(int *dev_k, int *dev_l) {
int numblocks = (N_device + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
// printf("Kernels %d %d\n", numblocks, LINEAR_BLOCKSIZE);
MAXIND_PARALLEL<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_k, -1);
if (numblocks > 1) {
MAXIND_PARALLEL<<<1, LINEAR_BLOCKSIZE>>>(dev_k, numblocks);
}
MAXIND_PARALLEL<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_l, -1);
if (numblocks > 1) {
MAXIND_PARALLEL<<<1, LINEAR_BLOCKSIZE>>>(dev_l, numblocks);
}
}
__global__ void UPDATE_IND(int *k, int *l) {
MAXIND(*k, &ind[*k]);
MAXIND(*l, &ind[*l]);
}
void JACOBI(int n, double *dev_E, double *dev_e, double *dev_S) {
N_device = n;
int *dev_m, *dev_k, *dev_l;
double *dev_t_;
int state_local = n;
int numblocks = (n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
cudaMalloc(&dev_m, sizeof(int));
cudaMalloc(&dev_k, sizeof(int));
cudaMalloc(&dev_l, sizeof(int));
cudaMalloc(&dev_t_, sizeof(double));
int *tmp_state;
int *tmp_ind;
bool *tmp_changed;
double *tmp_c, *tmp_s, *tmp_temp_maximums;
int *tmp_temp_indices;
cudaMalloc(&tmp_state, sizeof(int));
cudaMalloc(&tmp_ind, sizeof(int) * n);
cudaMalloc(&tmp_changed, sizeof(bool) * n);
cudaMalloc(&tmp_c, sizeof(double));
cudaMalloc(&tmp_s, sizeof(double));
cudaMalloc(&tmp_temp_maximums, sizeof(double) * numblocks);
cudaMalloc(&tmp_temp_indices, sizeof(int) * numblocks);
INIT0<<<1, 1>>>(dev_S, dev_E, dev_e, tmp_c, tmp_s, tmp_temp_maximums,
tmp_changed, tmp_ind, tmp_state, n, tmp_temp_indices);
// cudaMemcpyToSymbol("state", &tmp_state, sizeof(void *));
// cudaMemcpyToSymbol("ind", &tmp_ind, sizeof(void *));
// cudaMemcpyToSymbol("changed", &tmp_changed, sizeof(void *));
// cudaMemcpyToSymbol("c", &tmp_c, sizeof(void *));
// cudaMemcpyToSymbol("s", &tmp_s, sizeof(void *));
// cudaMemcpyToSymbol("temp_maximums", &tmp_temp_maximums, sizeof(void *));
// cudaMemcpyToSymbol("temp_indices", &tmp_temp_indices, sizeof(void *));
// cudaMemcpyToSymbol("S", &dev_S, sizeof(void *));
// cudaMemcpyToSymbol("E", &dev_E, sizeof(void *));
// cudaMemcpyToSymbol("e", &dev_e, sizeof(void *));
// cudaMemcpyToSymbol("N", &n, sizeof(int));
numblocks = (n * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
INIT1<<<numblocks, LINEAR_BLOCKSIZE>>>();
INIT2<<<1, 1>>>();
numblocks = (n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
INIT3<<<numblocks, LINEAR_BLOCKSIZE>>>();
int count = 0;
int checkpoint = max(1, (n * n) / 100);
// printf("%d",checkpoint);
while (state_local > 0) {
// break;
count++;
// BEST_M<<<1, 1>>>(dev_m, n, dev_S, dev_ind);
BEST_M_HOST(dev_m);
GET_S_C<<<1, 1>>>(dev_k, dev_l, dev_m, dev_t_);
UPDATE_COMBINED<<<1, 1>>>(dev_k, dev_l, dev_t_);
ROTATE_MULTIPLE1<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_k, dev_l);
ROTATE_MULTIPLE2<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_k, dev_l);
ROTATE_MULTIPLE3<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_k, dev_l);
UPDATE_E<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_k, dev_l);
// UPDATE_IND<<<1, 1>>>(dev_k, dev_l, dev_ind, n, dev_S);
UPDATE_IND_PARALLEL(dev_k, dev_l);
if (count % checkpoint == 0) {
// printf("hey\n");
cudaMemcpy(&state_local, tmp_state, sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("Checkpoint= %d\tState= %d\tIterNumber= %d\n", count / checkpoint,
state_local, count);
}
}
// printf("%d %d\n", state, count);
cudaFree(tmp_state);
cudaFree(tmp_ind);
cudaFree(tmp_changed);
cudaFree(dev_m);
cudaFree(dev_k);
cudaFree(dev_l);
cudaFree(tmp_c);
cudaFree(tmp_s);
cudaFree(dev_t_);
cudaFree(tmp_temp_maximums);
cudaFree(tmp_temp_indices);
}
__global__ void ODD_EVEN_SORT(double *arr, int *indices, int n,
bool *converged) {
int index_global = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
*converged = false;
bool odd_iter = false;
double temp;
int to_see, to_see_next, index_local, i, temp_int;
for (i = index_global; i < n; i += stride) {
indices[i] = i;
}
while (!(*converged)) {
__syncthreads();
*converged = true;
for (index_local = index_global; index_local < n / 2;
index_local += stride) {
if (odd_iter && 2 * index_local + 2 < n) {
to_see = 2 * index_local + 1;
to_see_next = 2 * index_local + 2;
if (arr[to_see] < arr[to_see_next]) {
temp = arr[to_see_next];
arr[to_see_next] = arr[to_see];
arr[to_see] = temp;
temp_int = indices[to_see_next];
indices[to_see_next] = indices[to_see];
indices[to_see] = temp_int;
*converged = false;
}
} else if (!odd_iter && 2 * index_local + 1 < n) {
to_see = 2 * index_local;
to_see_next = 2 * index_local + 1;
if (arr[to_see] < arr[to_see_next]) {
temp = arr[to_see_next];
arr[to_see_next] = arr[to_see];
arr[to_see] = temp;
temp_int = indices[to_see_next];
indices[to_see_next] = indices[to_see];
indices[to_see] = temp_int;
*converged = false;
}
}
}
odd_iter = !odd_iter;
}
}
__global__ void TRANSPOSE(double *M, int m, int n, double *M_T) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n * m) {
M_T[i] = M[INDEX(i % m, i / m, m, n)];
}
}
__global__ void MATMUL2(int p, int q, int r, double *A, double *B, double *C) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i;
double sum = 0;
if (row < p && col < r) {
for (i = 0; i < q; i++) {
sum += A[INDEX(row, i, p, q)] * B[INDEX(i, col, q, r)];
}
C[INDEX(row, col, p, r)] = sum;
}
}
__global__ void ARRANGE(int *indices, double *old_E, double *new_E, int n1,
int n2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n1 * n2) {
new_E[i] = old_E[INDEX(i / n2, indices[i % n2], n1, n2)];
}
}
__global__ void GET_SINGULAR_VALS(int n, double *e, double *SIGMA,
double *SIGMA_INV) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double sqrt_;
if (i < n) {
sqrt_ = sqrt(e[i]);
SIGMA[i] = sqrt_;
SIGMA_INV[i] = 1 / sqrt_;
}
}
// TODO
__global__ void GET_EIGEN_SUM(double *eigen_total, double *e, int n) {
int i;
*eigen_total = 0;
for (i = 0; i < n; i++) {
*eigen_total += e[i];
}
}
__global__ void MULTIPLY_SIGMA_INV(int m, int n, double *M, double *V,
double *SIGMA_INV, double *U) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i;
double sum = 0;
if (row < m && col < m) {
if (col < n) {
for (i = 0; i < n; i++) {
sum += M[INDEX(row, i, m, n)] * V[INDEX(i, col, n, n)];
}
U[INDEX(row, col, m, m)] = sum * SIGMA_INV[col];
} else {
U[INDEX(row, col, m, m)] = 0;
}
}
}
void GET_U(int m, int n, double *dev_M, double *dev_V, double *dev_SIGMA_INV,
double *dev_U) {
dim3 dimBlock(SQUARE_BLOCKSIZE, SQUARE_BLOCKSIZE);
dim3 dimGrid((m + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE,
(m + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE);
MULTIPLY_SIGMA_INV<<<dimGrid, dimBlock>>>(m, n, dev_M, dev_V, dev_SIGMA_INV,
dev_U);
}
__global__ void GET_RETENTION(int *k, int n, double *e, double *eigen_total,
double retention) {
int k_retended = 0;
double retention_done = 0;
int i;
for (i = 0; i < n; i++) {
retention_done += 100 * e[i] / *eigen_total;
k_retended++;
if (retention_done >= retention) {
break;
}
}
*k = k_retended;
}
__global__ void GET_W(int k_retended, int n, double *W, double *E) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n * k_retended) {
W[i] = E[INDEX(i / k_retended, i % k_retended, n, n)];
}
}
__global__ void testDev(double *U, double *V_T, double *SIGMA, double *M, int m,
int n, double *UV, double *new_M) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
UV[INDEX(i, j, m, n)] = U[INDEX(i, j, m, m)] * SIGMA[j];
}
}
double max_error = 0;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
double sum = 0;
for (int k = 0; k < n; k++) {
sum += UV[INDEX(i, k, m, n)] * V_T[INDEX(k, j, n, n)];
}
new_M[INDEX(i, j, m, n)] = sum;
// printf("%f\n",M[INDEX(i, j, m, n)] - sum);
if (fabs(M[INDEX(i, j, m, n)] - sum) > max_error) {
max_error = fabs(M[INDEX(i, j, m, n)] - sum);
}
}
}
printf("Max error = %f", max_error);
}
void test(double *U, double *SIGMA, double *V_T, double *M, int m, int n) {
double *temp, *new_M;
cudaMalloc(&temp, sizeof(double) * m * n);
cudaMalloc(&new_M, sizeof(double) * m * n);
testDev<<<1, 1>>>(U, V_T, SIGMA, M, m, n, temp, new_M);
// printMat<<<1, 1>>>(new_M, m, n);
// printMat<<<1, 1>>>(M, m, n);
cudaFree(temp);
cudaFree(new_M);
}
void SVD_and_PCA(int m, int n, double *D, double **U, double **SIGMA,
double **V_T, int *SIGMAm, int *SIGMAn, double **D_HAT, int *K,
int retention) {
double *dev_M, *dev_M_T, *dev_S, *dev_e, *dev_E, *dev_new_E, *dev_eigen_total,
*dev_SIGMA, *dev_SIGMA_INV, *dev_V_T, *dev_U, *dev_W, *dev_D_HAT;
int *dev_k, *dev_indices,
numblocks = (m * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
cudaMalloc(&dev_M, sizeof(double) * m * n);
cudaMemcpy(dev_M, D, sizeof(double) * m * n, cudaMemcpyHostToDevice);
cudaMalloc(&dev_M_T, sizeof(double) * m * n);
TRANSPOSE<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_M, m, n, dev_M_T);
cudaMalloc(&dev_S, sizeof(double) * n * n);
dim3 dimBlock(SQUARE_BLOCKSIZE, SQUARE_BLOCKSIZE);
dim3 dimGrid((n + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE,
(n + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE);
MATMUL2<<<dimGrid, dimBlock>>>(n, m, n, dev_M_T, dev_M, dev_S);
cudaFree(dev_M_T);
cudaMalloc(&dev_e, sizeof(double) * n);
cudaMalloc(&dev_E, sizeof(double) * n * n);
JACOBI(n, dev_E, dev_e, dev_S);
cudaFree(dev_S);
cudaMalloc(&dev_indices, sizeof(int) * n);
cudaMalloc(&dev_new_E, sizeof(double) * n * n);
bool *converged;
cudaMalloc(&converged, sizeof(bool));
// numblocks = (((n + 1) / 2) + BLOCKSIZE - 1) / BLOCKSIZE;
// printf("num %d\n", numblocks);
ODD_EVEN_SORT<<<1, LINEAR_BLOCKSIZE>>>(dev_e, dev_indices, n, converged);
cudaFree(converged);
numblocks = (n * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
ARRANGE<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_indices, dev_E, dev_new_E, n, n);
// printVec<<<1, 1>>>(dev_e, n);
// printVec<<<1, 1>>>(dev_indices, n);
cudaFree(dev_indices);
cudaFree(dev_E);
dev_E = dev_new_E;
cudaMalloc(&dev_SIGMA, sizeof(double) * n);
cudaMalloc(&dev_SIGMA_INV, sizeof(double) * n);
numblocks = (n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
GET_SINGULAR_VALS<<<numblocks, LINEAR_BLOCKSIZE>>>(n, dev_e, dev_SIGMA,
dev_SIGMA_INV);
cudaMalloc(&dev_eigen_total, sizeof(int));
GET_EIGEN_SUM<<<1, 1>>>(dev_eigen_total, dev_e, n);
cudaMalloc(&dev_V_T, sizeof(double) * n * n);
numblocks = (n * n + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
TRANSPOSE<<<numblocks, LINEAR_BLOCKSIZE>>>(dev_E, n, n, dev_V_T);
cudaMalloc(&dev_U, sizeof(double) * m * m);
GET_U(m, n, dev_M, dev_E, dev_SIGMA_INV, dev_U);
cudaFree(dev_SIGMA_INV);
cudaMalloc(&dev_k, sizeof(int));
GET_RETENTION<<<1, 1>>>(dev_k, n, dev_e, dev_eigen_total, retention);
cudaFree(dev_eigen_total);
cudaFree(dev_e);
cudaMemcpy(K, dev_k, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_k);
cudaMalloc(&dev_W, sizeof(double) * n * (*K));
cudaMalloc(&dev_D_HAT, sizeof(double) * m * (*K));
numblocks = (n * (*K) + LINEAR_BLOCKSIZE - 1) / LINEAR_BLOCKSIZE;
GET_W<<<numblocks, LINEAR_BLOCKSIZE>>>(*K, n, dev_W, dev_E);
cudaFree(dev_E);
dimGrid = dim3((*K + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE,
(m + SQUARE_BLOCKSIZE - 1) / SQUARE_BLOCKSIZE);
MATMUL2<<<dimGrid, dimBlock>>>(m, n, *K, dev_M, dev_W, dev_D_HAT);
// test(dev_U, dev_SIGMA, dev_V_T, dev_M, m, n);
cudaFree(dev_W);
cudaFree(dev_M);
*U = (double *)malloc(sizeof(double) * m * m);
cudaMemcpy(*U, dev_U, sizeof(double) * m * m, cudaMemcpyDeviceToHost);
cudaFree(dev_U);
*SIGMA = (double *)malloc(sizeof(double) * n);
cudaMemcpy(*SIGMA, dev_SIGMA, sizeof(double) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_SIGMA);
*V_T = (double *)malloc(sizeof(double) * n * n);
cudaMemcpy(*V_T, dev_V_T, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
cudaFree(dev_V_T);
*D_HAT = (double *)malloc(sizeof(double) * m * (*K));
cudaMemcpy(*D_HAT, dev_D_HAT, sizeof(double) * m * (*K),
cudaMemcpyDeviceToHost);
printMat<<<1, 1>>>(dev_D_HAT, m, *K);
cudaFree(dev_D_HAT);
cudaDeviceSynchronize();
*SIGMAm = m;
*SIGMAn = n;
}
|
1da6d65dd1ca465e3be01c1e4f1345000677451a.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2017 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* This sample does a very simple vector add, and will trigger illegal memory
* access error. The purpose of this sample is to test the error handling of
* the device plugin or other components.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/**
* Computes the vector addition and intentionally triggers memory error
*/
__global__ void
vectorAddAndTriggerError(const float *A, const float *B, float *C, int numElements)
{
// Intentionally triggering out of bounds
int i = (blockDim.x * blockIdx.x) + threadIdx.x + 10000000;
C[i] = A[i] + B[i];
}
int main(void)
{
printf("Starting illegal memory access sample\n");
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
int vecLength = 50000;
size_t size = vecLength * sizeof(float);
// Initializing two vectors on host
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
for (int i = 0; i < vecLength; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocating three vectors on device
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
// copy data from host to device
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Run the vectorAdd func and trigger error
int threadsPerBlock = 256;
int blocksPerGrid =(vecLength + threadsPerBlock - 1) / threadsPerBlock;
printf("Run vectorAdd with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAddAndTriggerError), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, vecLength);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy results from the device to the host\n");
float *h_C = (float *)malloc(size);
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Expecting error here
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
| 1da6d65dd1ca465e3be01c1e4f1345000677451a.cu | /* Copyright 2017 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* This sample does a very simple vector add, and will trigger illegal memory
* access error. The purpose of this sample is to test the error handling of
* the device plugin or other components.
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
/**
* Computes the vector addition and intentionally triggers memory error
*/
__global__ void
vectorAddAndTriggerError(const float *A, const float *B, float *C, int numElements)
{
// Intentionally triggering out of bounds
int i = (blockDim.x * blockIdx.x) + threadIdx.x + 10000000;
C[i] = A[i] + B[i];
}
int main(void)
{
printf("Starting illegal memory access sample\n");
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int vecLength = 50000;
size_t size = vecLength * sizeof(float);
// Initializing two vectors on host
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
for (int i = 0; i < vecLength; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocating three vectors on device
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
// copy data from host to device
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Run the vectorAdd func and trigger error
int threadsPerBlock = 256;
int blocksPerGrid =(vecLength + threadsPerBlock - 1) / threadsPerBlock;
printf("Run vectorAdd with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAddAndTriggerError<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, vecLength);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy results from the device to the host\n");
float *h_C = (float *)malloc(size);
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Expecting error here
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
55ea9bdd4245445e5883bc66d0502bd5d6c06f3b.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define DEBUG 0
#define PROFILE 1
#if PROFILE
//Events for timing analysis
hipEvent_t beginLoop;
hipEvent_t endLoop;
hipEvent_t beginEvent;
hipEvent_t endEvent;
//event time records
float randomPosKernelTime;
float searchAlgoTime;
#endif
#if DEBUG
#define NUMBOIDS 10
int printcnt = 0;
int maxprints = 4;
#endif
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
#define maxVel 1.0f
#define minVel -1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_orderedPos;
glm::vec3 *dev_orderedVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
#if PROFILE
hipEventCreate(&beginEvent);
hipEventCreate(&endEvent);
#endif
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
#if PROFILE
hipEventRecord(beginEvent, 0);
#endif
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
#if PROFILE
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&randomPosKernelTime, beginEvent, endEvent);
std::cout << "pos init Time: " << randomPosKernelTime << std::endl;
#endif
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_orderedPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_orderedPos failed!");
hipMalloc((void**)&dev_orderedVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_orderedVel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 centerOfMass = glm::vec3(0.0f, 0.0f, 0.0f); //rule 1
glm::vec3 keepAway = glm::vec3(0.0f, 0.0f, 0.0f); //rule 2
glm::vec3 neighborVels = glm::vec3(0.0f, 0.0f, 0.0f); //rule 3
int cnt1 = 0;
int cnt3 = 0;
for (int iBoid = 0; iBoid < N; ++iBoid)
{
if (iBoid == iSelf) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::length(pos[iBoid] - pos[iSelf]) < rule1Distance)
{
centerOfMass = centerOfMass + pos[iBoid];
++cnt1;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::length(pos[iBoid] - pos[iSelf]) < rule2Distance)
keepAway = keepAway - (pos[iBoid] - pos[iSelf]);
// Rule 3: boids try to match the speed of surrounding boids
if (glm::length(pos[iBoid] - pos[iSelf]) < rule3Distance)
{
neighborVels = neighborVels + vel[iBoid];
++cnt3;
}
}
//calculate averaged parameters
if (cnt1) centerOfMass = (centerOfMass / (float) cnt1 - pos[iSelf]) * rule1Scale;
keepAway = keepAway * rule2Scale;
if (cnt3) neighborVels = (neighborVels / (float) cnt3 - vel[iSelf]) * rule3Scale;
return vel[iSelf] + centerOfMass + keepAway + neighborVels;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
//calculate index
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
newVel.x = glm::clamp(newVel.x, minVel, maxVel);
newVel.y = glm::clamp(newVel.y, minVel, maxVel);
newVel.z = glm::clamp(newVel.z, minVel, maxVel);
// Record the new velocity into vel2. Question: why NOT vel1? Next result depends on prev vels
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__device__ glm::vec3 posToFloat3DIndex(glm::vec3 pos, glm::vec3 gridMin, float inverseCellWidth)
{
//to zero-index everything, must subtract off minimum value
//NOTE these are still floats!!
return glm::vec3(((pos.x - gridMin.x) * inverseCellWidth),
((pos.y - gridMin.y) * inverseCellWidth),
((pos.z - gridMin.z) * inverseCellWidth));
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
{
return;
}
// Label each boid with the index of its grid cell.
glm::vec3 grid3DIndex = posToFloat3DIndex(pos[index], gridMin, inverseCellWidth);
int gridCell = gridIndex3Dto1D((int)grid3DIndex.x, (int)grid3DIndex.y, (int)grid3DIndex.z, gridResolution);
#if 0
if (index == 0){
printf("my index: %d\n my cell: %d\n", index, gridCell);
printf("my pos: %f %f %f\n", pos[index].x, pos[index].y, pos[index].z);
printf("my 3D grid: %f %f %f\n", grid3DIndex.x, grid3DIndex.y, grid3DIndex.z);
printf("my gridcell: %d\n", gridCell);
}
#endif
gridIndices[index] = gridCell; //index is boid index, points to grid index
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index; //index corresponds to gridIndices indices, points to boid index
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
{
return;
}
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int myCell = particleGridIndices[index];
if (index == 0 || particleGridIndices[index - 1] != myCell)
{
gridCellStartIndices[myCell] = index;
}
if (index == N-1 || particleGridIndices[index + 1] != myCell)
{
gridCellEndIndices[myCell] = index;
}
}
__device__ int getNeighbors(glm::vec3 pos, float inverseCellWidth,
float cellWidth, int gridResolution, glm::vec3 gridMin, int * neighbors)
{
float halfWidth = cellWidth * 0.5f;
glm::vec3 myFloatGridPos = posToFloat3DIndex (pos, gridMin, inverseCellWidth);
glm::vec3 gridStart = glm::vec3( 0.0f, 0.0f, 0.0f );
glm::vec3 gridEnd = glm::vec3( 0.0f, 0.0f, 0.0f );
//if adding a halfwidth results in the same tile, then they are in
if ((int)((pos.x - gridMin.x + halfWidth) * inverseCellWidth) == (int)myFloatGridPos.x)
gridStart.x = -1.0f ;
else
gridEnd.x = 1.0f ;
if ((int)((pos.y - gridMin.y + halfWidth) * inverseCellWidth) == (int)myFloatGridPos.y)
gridStart.y = -1.0f ;
else
gridEnd.y = 1.0f ;
if ((int)((pos.z - gridMin.z + halfWidth) * inverseCellWidth) == (int)myFloatGridPos.z)
gridStart.z = -1.0f ;
else
gridEnd.z = 1.0f ;
//calculate which cells are adjacent to me and put them in the buffer
int neighborCnt = 0;
for (int i = (int)myFloatGridPos.x + (int)gridStart.x; i <= (int)myFloatGridPos.x + (int)gridEnd.x; ++i)
{
if (i < 0 || i >= gridResolution)
continue;
for (int j = (int)myFloatGridPos.y + (int)gridStart.y; j <= (int)myFloatGridPos.y + (int)gridEnd.y; ++j)
{
if (j < 0 || j >= gridResolution)
continue;
for (int k = (int)myFloatGridPos.z + (int)gridStart.z; k <= (int)myFloatGridPos.z + (int)gridEnd.z; ++k)
{
if (k < 0 || k >= gridResolution)
continue;
int neighborCell = gridIndex3Dto1D(i, j, k, gridResolution);
neighbors[neighborCnt] = neighborCell;
++ neighborCnt;
}
}
}
return neighborCnt ;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int particleNum = (blockIdx.x * blockDim.x) + threadIdx.x;
if (particleNum >= N)
{
return;
}
int myBoidIndex = particleArrayIndices[particleNum];
//Get a list of the grid cells that this particle is in
//and its closest relevant neighbors
int neighbors[8];
int neighborCnt = getNeighbors(pos[myBoidIndex],
inverseCellWidth, cellWidth, gridResolution, gridMin, neighbors);
#if DEBUG
if (myBoidIndex == 10) { for (int d = 0; d < neighborCnt; ++d) printf("neighbor %d = %d\n", d, neighbors[d]); }
#endif
glm::vec3 centerOfMass = glm::vec3(0.0f, 0.0f, 0.0f); //rule 1
glm::vec3 keepAway = glm::vec3(0.0f, 0.0f, 0.0f); //rule 2
glm::vec3 neighborVels = glm::vec3(0.0f, 0.0f, 0.0f); //rule 3
int cnt1 = 0;
int cnt3 = 0;
for (int i = 0; i < neighborCnt; ++i)
{
// For each cell, read the start/end indices in the boid pointer array.
int currentCellIndex = neighbors[i];
int startIndex = gridCellStartIndices[currentCellIndex];
int endIndex = gridCellEndIndices[currentCellIndex];
#if DEBUG
if (myBoidIndex == 10) { printf("start %d end %d\n", startIndex, endIndex); }
#endif
// Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int iterIndex = startIndex; iterIndex <= endIndex; ++iterIndex)
{
int neighborBoidIndex = particleArrayIndices[iterIndex];
if (myBoidIndex == neighborBoidIndex) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::length(pos[neighborBoidIndex] - pos[myBoidIndex]) < rule1Distance)
{
centerOfMass = centerOfMass + pos[neighborBoidIndex];
++cnt1;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::length(pos[neighborBoidIndex] - pos[myBoidIndex]) < rule2Distance)
keepAway = keepAway - (pos[neighborBoidIndex] - pos[myBoidIndex]);
// Rule 3: boids try to match the speed of surrounding boids
if (glm::length(pos[neighborBoidIndex] - pos[myBoidIndex]) < rule3Distance)
{
neighborVels = neighborVels + vel1[neighborBoidIndex];
++cnt3;
}
}
}
//calculate averaged parameters
if (cnt1) centerOfMass = (centerOfMass / (float)cnt1 - pos[myBoidIndex]) * rule1Scale;
keepAway = keepAway * rule2Scale;
if (cnt3) neighborVels = (neighborVels / (float)cnt3 - vel1[myBoidIndex]) * rule3Scale;
glm::vec3 newVel = vel1[myBoidIndex] + centerOfMass + keepAway + neighborVels;
#if DEBUG
if (myBoidIndex == 10){
printf("my pos is %f %f %f\n", pos[10].x, pos[10].y, pos[10].z);
printf("cnt1= %d, cnt3=%d\n", cnt1, cnt3);
printf("newvel is %f %f %f\n", newVel.x, newVel.y, newVel.z);
}
#endif
// Clamp the speed change before putting the new speed in vel2
newVel.x = glm::clamp(newVel.x, minVel, maxVel);
newVel.y = glm::clamp(newVel.y, minVel, maxVel);
newVel.z = glm::clamp(newVel.z, minVel, maxVel);
vel2[myBoidIndex] = newVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int particleIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (particleIndex >= N)
{
return;
}
// - Identify which cells may contain neighbors. This isn't always 8.
//Get a list of the grid cells that this particle is in
//and its closest relevant neighbors
int neighbors[8];
int neighborCnt = getNeighbors(pos[particleIndex],
inverseCellWidth, cellWidth, gridResolution, gridMin, neighbors);
#if DEBUG
if (particleIndex == 10) { for (int d = 0; d < neighborCnt; ++d) printf("neighbor %d = %d\n", d, neighbors[d]); }
#endif
glm::vec3 centerOfMass = glm::vec3(0.0f, 0.0f, 0.0f); //rule 1
glm::vec3 keepAway = glm::vec3(0.0f, 0.0f, 0.0f); //rule 2
glm::vec3 neighborVels = glm::vec3(0.0f, 0.0f, 0.0f); //rule 3
int cnt1 = 0;
int cnt3 = 0;
for (int i = 0; i < neighborCnt; ++i)
{
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
int currentCellIndex = neighbors[i];
int startIndex = gridCellStartIndices[currentCellIndex];
int endIndex = gridCellEndIndices[currentCellIndex];
#if DEBUG
if (particleIndex == 10) { printf("start %d end %d\n", startIndex, endIndex); }
#endif
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int neighborIndex = startIndex; neighborIndex <= endIndex; ++neighborIndex)
{
if (neighborIndex == particleIndex) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::length(pos[neighborIndex] - pos[particleIndex]) < rule1Distance)
{
centerOfMass = centerOfMass + pos[neighborIndex];
++cnt1;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::length(pos[neighborIndex] - pos[particleIndex]) < rule2Distance)
keepAway = keepAway - (pos[neighborIndex] - pos[particleIndex]);
// Rule 3: boids try to match the speed of surrounding boids
if (glm::length(pos[neighborIndex] - pos[particleIndex]) < rule3Distance)
{
neighborVels = neighborVels + vel1[neighborIndex];
++cnt3;
}
}
}
//calculate averaged parameters
if (cnt1) centerOfMass = (centerOfMass / (float)cnt1 - pos[particleIndex]) * rule1Scale;
keepAway = keepAway * rule2Scale;
if (cnt3) neighborVels = (neighborVels / (float)cnt3 - vel1[particleIndex]) * rule3Scale;
glm::vec3 newVel = vel1[particleIndex] + centerOfMass + keepAway + neighborVels;
#if DEBUG
if (particleIndex == 10){
printf("my pos is %f %f %f\n", pos[10].x, pos[10].y, pos[10].z);
printf("cnt1= %d, cnt3=%d\n", cnt1, cnt3);
printf("newvel is %f %f %f\n", newVel.x, newVel.y, newVel.z);
}
#endif
// - Clamp the speed change before putting the new speed in vel2
newVel.x = glm::clamp(newVel.x, minVel, maxVel);
newVel.y = glm::clamp(newVel.y, minVel, maxVel);
newVel.z = glm::clamp(newVel.z, minVel, maxVel);
vel2[particleIndex] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid = (numObjects + blockSize - 1) / blockSize ;
#if PROFILE
hipEventRecord(beginEvent, 0);
#endif
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
#if PROFILE
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&searchAlgoTime, beginEvent, endEvent);
std::cout << "search Time: " << searchAlgoTime << std::endl;
#endif
hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
glm::vec3 *tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// Uniform Grid Neighbor search using Thrust sort.
dim3 fullBlocksPerGrid = (numObjects + blockSize - 1) / blockSize;
dim3 fullBlocksPerGridForCells = (gridCellCount + blockSize - 1) / blockSize;
#if DEBUG
glm::vec3 pos[NUMBOIDS];
if (printcnt < maxprints){
hipMemcpy(pos, dev_pos, sizeof(glm::vec3) * NUMBOIDS, hipMemcpyDeviceToHost);
std::cout << "positions: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " boid#: " << i;
std::cout << " pos : " << pos[i].x << " " << pos[i].y << " " << pos[i].z << std::endl;
}
}
#endif
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
hipLaunchKernelGGL(( kernComputeIndices) , dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
#if DEBUG
int particleGridIndices[NUMBOIDS];
int particleArrayIndices[NUMBOIDS];
if (printcnt < maxprints){
hipMemcpy(particleGridIndices, dev_particleGridIndices, sizeof(int) * NUMBOIDS, hipMemcpyDeviceToHost);
hipMemcpy(particleArrayIndices, dev_particleArrayIndices, sizeof(int) * NUMBOIDS, hipMemcpyDeviceToHost);
std::cout << "thrust: before unstable sort: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " key: " << particleGridIndices[i];
std::cout << " value: " << particleArrayIndices[i] << std::endl;
}
}
#endif
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
#if DEBUG
if (printcnt < maxprints){
hipMemcpy(particleGridIndices, dev_particleGridIndices, sizeof(int) * NUMBOIDS, hipMemcpyDeviceToHost);
hipMemcpy(particleArrayIndices, dev_particleArrayIndices, sizeof(int) * NUMBOIDS, hipMemcpyDeviceToHost);
std::cout << "thrust: after unstable sort: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " key: " << particleGridIndices[i];
std::cout << " value: " << particleArrayIndices[i] << std::endl;
}
}
#endif
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd1 failed!");
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd2 failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGridForCells, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
#if DEBUG
const int cells = 22 * 22 * 22;
int gridCellStartIndices[cells];
int gridCellEndIndices[cells];
if (printcnt < maxprints){
hipMemcpy(gridCellStartIndices, dev_gridCellStartIndices, sizeof(int) * cells, hipMemcpyDeviceToHost);
hipMemcpy(gridCellEndIndices, dev_gridCellEndIndices, sizeof(int) * cells, hipMemcpyDeviceToHost);
std::cout << "start/end results: " << std::endl;
for (int i = 0; i < cells; i++) {
if (gridCellStartIndices[i] == -1 && gridCellEndIndices[i] == -1) continue;
if (gridCellStartIndices[i] != -1 && gridCellEndIndices[i] != -1){
std::cout << " cell index: " << i;
std::cout << " start: " << gridCellStartIndices[i];
std::cout << " end: " << gridCellEndIndices[i] << std::endl;
}
else
{
std::cout << "PROBLEM cell index: " << i;
std::cout << " start: " << gridCellStartIndices[i];
std::cout << " end: " << gridCellEndIndices[i] << std::endl;
}
}
}
#endif
#if PROFILE
hipEventRecord(beginEvent, 0);
#endif
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0,
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
#if PROFILE
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&searchAlgoTime, beginEvent, endEvent);
std::cout << "search Time: " << searchAlgoTime << std::endl;
#endif
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
glm::vec3 *tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
#if DEBUG
printcnt++;
#endif
}
__global__ void kernRearrangeBoidData(
int N, int *ordering,
glm::vec3 *originalPos, glm::vec3 *orderedPos,
glm::vec3 *originalVel, glm::vec3 *orderedVel) {
int newIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (newIndex >= N)
{
return;
}
// boid at newIndex corresponds to pos and val at boidIndex
int boidIndex = ordering[newIndex];
// reorder data in new buffer to reflect newIndex
orderedPos[newIndex] = originalPos[boidIndex];
orderedVel[newIndex] = originalVel[boidIndex];
}
__global__ void kernReplaceBoidVelData(
int N, int *ordering,
glm::vec3 *originalVel, glm::vec3 *orderedVel) {
int newIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (newIndex >= N)
{
return;
}
// boid at newIndex corresponds to pos and val at boidIndex
int boidIndex = ordering[newIndex];
// reorder data in new buffer to reflect newIndex
originalVel[boidIndex] = orderedVel[newIndex];
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
dim3 fullBlocksPerGrid = (numObjects + blockSize - 1) / blockSize;
dim3 fullBlocksPerGridForCells = (gridCellCount + blockSize - 1) / blockSize;
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
hipLaunchKernelGGL(( kernComputeIndices) , dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd1 failed!");
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd2 failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGridForCells, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernRearrangeBoidData << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices,
dev_pos, dev_orderedPos, dev_vel1, dev_orderedVel);
checkCUDAErrorWithLine("kernRearrangeBoidData failed!");
#if DEBUG
int particleGridIndices[NUMBOIDS];
int particleArrayIndices[NUMBOIDS];
glm::vec3 originalpos[NUMBOIDS];
glm::vec3 orderedpos[NUMBOIDS];
glm::vec3 originalvel[NUMBOIDS];
glm::vec3 orderedvel[NUMBOIDS];
if (printcnt < maxprints){
hipMemcpy(particleGridIndices, dev_particleGridIndices, sizeof(int) * NUMBOIDS, hipMemcpyDeviceToHost);
hipMemcpy(particleArrayIndices, dev_particleArrayIndices, sizeof(int) * NUMBOIDS, hipMemcpyDeviceToHost);
hipMemcpy(originalpos, dev_pos, sizeof(glm::vec3) * NUMBOIDS, hipMemcpyDeviceToHost);
hipMemcpy(orderedpos, dev_orderedPos, sizeof(glm::vec3) * NUMBOIDS, hipMemcpyDeviceToHost);
hipMemcpy(originalvel, dev_vel1, sizeof(glm::vec3) * NUMBOIDS, hipMemcpyDeviceToHost);
hipMemcpy(orderedvel, dev_orderedVel, sizeof(glm::vec3) * NUMBOIDS, hipMemcpyDeviceToHost);
std::cout << "PARTICLES: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " particle index: " << i;
std::cout << " original boid index: " << particleArrayIndices[i];
std::cout << " grid index: " << particleGridIndices[i];
std::cout << " pos in original: " << originalpos[particleArrayIndices[i]].x << originalpos[particleArrayIndices[i]].y << originalpos[particleArrayIndices[i]].z;
std::cout << " pos in reordered: " << orderedpos[i].x << orderedpos[i].y << orderedpos[i].z;
std::cout << " vel in original: " << originalvel[particleArrayIndices[i]].x << originalvel[particleArrayIndices[i]].y << originalvel[particleArrayIndices[i]].z;
std::cout << " vel in reordered: " << orderedvel[i].x << orderedvel[i].y << orderedvel[i].z << std::endl;
}
}
#endif
#if PROFILE
hipEventRecord(beginEvent, 0);
#endif
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_orderedPos, dev_orderedVel, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
#if PROFILE
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&randomPosKernelTime, beginEvent, endEvent);
std::cout << "search Time: " << searchAlgoTime << std::endl;
#endif
//Replace the updated velocities in their original indices
kernReplaceBoidVelData << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices,
dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernReplaceBoidVelData failed!");
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
// since we're using vel1 to hold the original ordering of the updated vel,
// no need to ping-pong
#if DEBUG
printcnt++;
#endif
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_orderedPos);
hipFree(dev_orderedVel);
#if PROFILE
hipEventDestroy(beginEvent);
hipEventDestroy(endEvent);
#endif
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 55ea9bdd4245445e5883bc66d0502bd5d6c06f3b.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define DEBUG 0
#define PROFILE 1
#if PROFILE
//Events for timing analysis
cudaEvent_t beginLoop;
cudaEvent_t endLoop;
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
//event time records
float randomPosKernelTime;
float searchAlgoTime;
#endif
#if DEBUG
#define NUMBOIDS 10
int printcnt = 0;
int maxprints = 4;
#endif
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
#define maxVel 1.0f
#define minVel -1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_orderedPos;
glm::vec3 *dev_orderedVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
#if PROFILE
cudaEventCreate(&beginEvent);
cudaEventCreate(&endEvent);
#endif
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
#if PROFILE
cudaEventRecord(beginEvent, 0);
#endif
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
#if PROFILE
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&randomPosKernelTime, beginEvent, endEvent);
std::cout << "pos init Time: " << randomPosKernelTime << std::endl;
#endif
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_orderedPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_orderedPos failed!");
cudaMalloc((void**)&dev_orderedVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_orderedVel failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 centerOfMass = glm::vec3(0.0f, 0.0f, 0.0f); //rule 1
glm::vec3 keepAway = glm::vec3(0.0f, 0.0f, 0.0f); //rule 2
glm::vec3 neighborVels = glm::vec3(0.0f, 0.0f, 0.0f); //rule 3
int cnt1 = 0;
int cnt3 = 0;
for (int iBoid = 0; iBoid < N; ++iBoid)
{
if (iBoid == iSelf) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::length(pos[iBoid] - pos[iSelf]) < rule1Distance)
{
centerOfMass = centerOfMass + pos[iBoid];
++cnt1;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::length(pos[iBoid] - pos[iSelf]) < rule2Distance)
keepAway = keepAway - (pos[iBoid] - pos[iSelf]);
// Rule 3: boids try to match the speed of surrounding boids
if (glm::length(pos[iBoid] - pos[iSelf]) < rule3Distance)
{
neighborVels = neighborVels + vel[iBoid];
++cnt3;
}
}
//calculate averaged parameters
if (cnt1) centerOfMass = (centerOfMass / (float) cnt1 - pos[iSelf]) * rule1Scale;
keepAway = keepAway * rule2Scale;
if (cnt3) neighborVels = (neighborVels / (float) cnt3 - vel[iSelf]) * rule3Scale;
return vel[iSelf] + centerOfMass + keepAway + neighborVels;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
//calculate index
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
newVel.x = glm::clamp(newVel.x, minVel, maxVel);
newVel.y = glm::clamp(newVel.y, minVel, maxVel);
newVel.z = glm::clamp(newVel.z, minVel, maxVel);
// Record the new velocity into vel2. Question: why NOT vel1? Next result depends on prev vels
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__device__ glm::vec3 posToFloat3DIndex(glm::vec3 pos, glm::vec3 gridMin, float inverseCellWidth)
{
//to zero-index everything, must subtract off minimum value
//NOTE these are still floats!!
return glm::vec3(((pos.x - gridMin.x) * inverseCellWidth),
((pos.y - gridMin.y) * inverseCellWidth),
((pos.z - gridMin.z) * inverseCellWidth));
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
{
return;
}
// Label each boid with the index of its grid cell.
glm::vec3 grid3DIndex = posToFloat3DIndex(pos[index], gridMin, inverseCellWidth);
int gridCell = gridIndex3Dto1D((int)grid3DIndex.x, (int)grid3DIndex.y, (int)grid3DIndex.z, gridResolution);
#if 0
if (index == 0){
printf("my index: %d\n my cell: %d\n", index, gridCell);
printf("my pos: %f %f %f\n", pos[index].x, pos[index].y, pos[index].z);
printf("my 3D grid: %f %f %f\n", grid3DIndex.x, grid3DIndex.y, grid3DIndex.z);
printf("my gridcell: %d\n", gridCell);
}
#endif
gridIndices[index] = gridCell; //index is boid index, points to grid index
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index; //index corresponds to gridIndices indices, points to boid index
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
{
return;
}
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int myCell = particleGridIndices[index];
if (index == 0 || particleGridIndices[index - 1] != myCell)
{
gridCellStartIndices[myCell] = index;
}
if (index == N-1 || particleGridIndices[index + 1] != myCell)
{
gridCellEndIndices[myCell] = index;
}
}
__device__ int getNeighbors(glm::vec3 pos, float inverseCellWidth,
float cellWidth, int gridResolution, glm::vec3 gridMin, int * neighbors)
{
float halfWidth = cellWidth * 0.5f;
glm::vec3 myFloatGridPos = posToFloat3DIndex (pos, gridMin, inverseCellWidth);
glm::vec3 gridStart = glm::vec3( 0.0f, 0.0f, 0.0f );
glm::vec3 gridEnd = glm::vec3( 0.0f, 0.0f, 0.0f );
//if adding a halfwidth results in the same tile, then they are in
if ((int)((pos.x - gridMin.x + halfWidth) * inverseCellWidth) == (int)myFloatGridPos.x)
gridStart.x = -1.0f ;
else
gridEnd.x = 1.0f ;
if ((int)((pos.y - gridMin.y + halfWidth) * inverseCellWidth) == (int)myFloatGridPos.y)
gridStart.y = -1.0f ;
else
gridEnd.y = 1.0f ;
if ((int)((pos.z - gridMin.z + halfWidth) * inverseCellWidth) == (int)myFloatGridPos.z)
gridStart.z = -1.0f ;
else
gridEnd.z = 1.0f ;
//calculate which cells are adjacent to me and put them in the buffer
int neighborCnt = 0;
for (int i = (int)myFloatGridPos.x + (int)gridStart.x; i <= (int)myFloatGridPos.x + (int)gridEnd.x; ++i)
{
if (i < 0 || i >= gridResolution)
continue;
for (int j = (int)myFloatGridPos.y + (int)gridStart.y; j <= (int)myFloatGridPos.y + (int)gridEnd.y; ++j)
{
if (j < 0 || j >= gridResolution)
continue;
for (int k = (int)myFloatGridPos.z + (int)gridStart.z; k <= (int)myFloatGridPos.z + (int)gridEnd.z; ++k)
{
if (k < 0 || k >= gridResolution)
continue;
int neighborCell = gridIndex3Dto1D(i, j, k, gridResolution);
neighbors[neighborCnt] = neighborCell;
++ neighborCnt;
}
}
}
return neighborCnt ;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int particleNum = (blockIdx.x * blockDim.x) + threadIdx.x;
if (particleNum >= N)
{
return;
}
int myBoidIndex = particleArrayIndices[particleNum];
//Get a list of the grid cells that this particle is in
//and its closest relevant neighbors
int neighbors[8];
int neighborCnt = getNeighbors(pos[myBoidIndex],
inverseCellWidth, cellWidth, gridResolution, gridMin, neighbors);
#if DEBUG
if (myBoidIndex == 10) { for (int d = 0; d < neighborCnt; ++d) printf("neighbor %d = %d\n", d, neighbors[d]); }
#endif
glm::vec3 centerOfMass = glm::vec3(0.0f, 0.0f, 0.0f); //rule 1
glm::vec3 keepAway = glm::vec3(0.0f, 0.0f, 0.0f); //rule 2
glm::vec3 neighborVels = glm::vec3(0.0f, 0.0f, 0.0f); //rule 3
int cnt1 = 0;
int cnt3 = 0;
for (int i = 0; i < neighborCnt; ++i)
{
// For each cell, read the start/end indices in the boid pointer array.
int currentCellIndex = neighbors[i];
int startIndex = gridCellStartIndices[currentCellIndex];
int endIndex = gridCellEndIndices[currentCellIndex];
#if DEBUG
if (myBoidIndex == 10) { printf("start %d end %d\n", startIndex, endIndex); }
#endif
// Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int iterIndex = startIndex; iterIndex <= endIndex; ++iterIndex)
{
int neighborBoidIndex = particleArrayIndices[iterIndex];
if (myBoidIndex == neighborBoidIndex) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::length(pos[neighborBoidIndex] - pos[myBoidIndex]) < rule1Distance)
{
centerOfMass = centerOfMass + pos[neighborBoidIndex];
++cnt1;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::length(pos[neighborBoidIndex] - pos[myBoidIndex]) < rule2Distance)
keepAway = keepAway - (pos[neighborBoidIndex] - pos[myBoidIndex]);
// Rule 3: boids try to match the speed of surrounding boids
if (glm::length(pos[neighborBoidIndex] - pos[myBoidIndex]) < rule3Distance)
{
neighborVels = neighborVels + vel1[neighborBoidIndex];
++cnt3;
}
}
}
//calculate averaged parameters
if (cnt1) centerOfMass = (centerOfMass / (float)cnt1 - pos[myBoidIndex]) * rule1Scale;
keepAway = keepAway * rule2Scale;
if (cnt3) neighborVels = (neighborVels / (float)cnt3 - vel1[myBoidIndex]) * rule3Scale;
glm::vec3 newVel = vel1[myBoidIndex] + centerOfMass + keepAway + neighborVels;
#if DEBUG
if (myBoidIndex == 10){
printf("my pos is %f %f %f\n", pos[10].x, pos[10].y, pos[10].z);
printf("cnt1= %d, cnt3=%d\n", cnt1, cnt3);
printf("newvel is %f %f %f\n", newVel.x, newVel.y, newVel.z);
}
#endif
// Clamp the speed change before putting the new speed in vel2
newVel.x = glm::clamp(newVel.x, minVel, maxVel);
newVel.y = glm::clamp(newVel.y, minVel, maxVel);
newVel.z = glm::clamp(newVel.z, minVel, maxVel);
vel2[myBoidIndex] = newVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int particleIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (particleIndex >= N)
{
return;
}
// - Identify which cells may contain neighbors. This isn't always 8.
//Get a list of the grid cells that this particle is in
//and its closest relevant neighbors
int neighbors[8];
int neighborCnt = getNeighbors(pos[particleIndex],
inverseCellWidth, cellWidth, gridResolution, gridMin, neighbors);
#if DEBUG
if (particleIndex == 10) { for (int d = 0; d < neighborCnt; ++d) printf("neighbor %d = %d\n", d, neighbors[d]); }
#endif
glm::vec3 centerOfMass = glm::vec3(0.0f, 0.0f, 0.0f); //rule 1
glm::vec3 keepAway = glm::vec3(0.0f, 0.0f, 0.0f); //rule 2
glm::vec3 neighborVels = glm::vec3(0.0f, 0.0f, 0.0f); //rule 3
int cnt1 = 0;
int cnt3 = 0;
for (int i = 0; i < neighborCnt; ++i)
{
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
int currentCellIndex = neighbors[i];
int startIndex = gridCellStartIndices[currentCellIndex];
int endIndex = gridCellEndIndices[currentCellIndex];
#if DEBUG
if (particleIndex == 10) { printf("start %d end %d\n", startIndex, endIndex); }
#endif
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int neighborIndex = startIndex; neighborIndex <= endIndex; ++neighborIndex)
{
if (neighborIndex == particleIndex) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::length(pos[neighborIndex] - pos[particleIndex]) < rule1Distance)
{
centerOfMass = centerOfMass + pos[neighborIndex];
++cnt1;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::length(pos[neighborIndex] - pos[particleIndex]) < rule2Distance)
keepAway = keepAway - (pos[neighborIndex] - pos[particleIndex]);
// Rule 3: boids try to match the speed of surrounding boids
if (glm::length(pos[neighborIndex] - pos[particleIndex]) < rule3Distance)
{
neighborVels = neighborVels + vel1[neighborIndex];
++cnt3;
}
}
}
//calculate averaged parameters
if (cnt1) centerOfMass = (centerOfMass / (float)cnt1 - pos[particleIndex]) * rule1Scale;
keepAway = keepAway * rule2Scale;
if (cnt3) neighborVels = (neighborVels / (float)cnt3 - vel1[particleIndex]) * rule3Scale;
glm::vec3 newVel = vel1[particleIndex] + centerOfMass + keepAway + neighborVels;
#if DEBUG
if (particleIndex == 10){
printf("my pos is %f %f %f\n", pos[10].x, pos[10].y, pos[10].z);
printf("cnt1= %d, cnt3=%d\n", cnt1, cnt3);
printf("newvel is %f %f %f\n", newVel.x, newVel.y, newVel.z);
}
#endif
// - Clamp the speed change before putting the new speed in vel2
newVel.x = glm::clamp(newVel.x, minVel, maxVel);
newVel.y = glm::clamp(newVel.y, minVel, maxVel);
newVel.z = glm::clamp(newVel.z, minVel, maxVel);
vel2[particleIndex] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid = (numObjects + blockSize - 1) / blockSize ;
#if PROFILE
cudaEventRecord(beginEvent, 0);
#endif
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
kernUpdateVelocityBruteForce <<<fullBlocksPerGrid, blockSize >>>(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
#if PROFILE
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&searchAlgoTime, beginEvent, endEvent);
std::cout << "search Time: " << searchAlgoTime << std::endl;
#endif
kernUpdatePos <<<fullBlocksPerGrid, blockSize >>>(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
glm::vec3 *tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// Uniform Grid Neighbor search using Thrust sort.
dim3 fullBlocksPerGrid = (numObjects + blockSize - 1) / blockSize;
dim3 fullBlocksPerGridForCells = (gridCellCount + blockSize - 1) / blockSize;
#if DEBUG
glm::vec3 pos[NUMBOIDS];
if (printcnt < maxprints){
cudaMemcpy(pos, dev_pos, sizeof(glm::vec3) * NUMBOIDS, cudaMemcpyDeviceToHost);
std::cout << "positions: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " boid#: " << i;
std::cout << " pos : " << pos[i].x << " " << pos[i].y << " " << pos[i].z << std::endl;
}
}
#endif
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices <<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
#if DEBUG
int particleGridIndices[NUMBOIDS];
int particleArrayIndices[NUMBOIDS];
if (printcnt < maxprints){
cudaMemcpy(particleGridIndices, dev_particleGridIndices, sizeof(int) * NUMBOIDS, cudaMemcpyDeviceToHost);
cudaMemcpy(particleArrayIndices, dev_particleArrayIndices, sizeof(int) * NUMBOIDS, cudaMemcpyDeviceToHost);
std::cout << "thrust: before unstable sort: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " key: " << particleGridIndices[i];
std::cout << " value: " << particleArrayIndices[i] << std::endl;
}
}
#endif
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
#if DEBUG
if (printcnt < maxprints){
cudaMemcpy(particleGridIndices, dev_particleGridIndices, sizeof(int) * NUMBOIDS, cudaMemcpyDeviceToHost);
cudaMemcpy(particleArrayIndices, dev_particleArrayIndices, sizeof(int) * NUMBOIDS, cudaMemcpyDeviceToHost);
std::cout << "thrust: after unstable sort: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " key: " << particleGridIndices[i];
std::cout << " value: " << particleArrayIndices[i] << std::endl;
}
}
#endif
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd1 failed!");
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd2 failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGridForCells, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
#if DEBUG
const int cells = 22 * 22 * 22;
int gridCellStartIndices[cells];
int gridCellEndIndices[cells];
if (printcnt < maxprints){
cudaMemcpy(gridCellStartIndices, dev_gridCellStartIndices, sizeof(int) * cells, cudaMemcpyDeviceToHost);
cudaMemcpy(gridCellEndIndices, dev_gridCellEndIndices, sizeof(int) * cells, cudaMemcpyDeviceToHost);
std::cout << "start/end results: " << std::endl;
for (int i = 0; i < cells; i++) {
if (gridCellStartIndices[i] == -1 && gridCellEndIndices[i] == -1) continue;
if (gridCellStartIndices[i] != -1 && gridCellEndIndices[i] != -1){
std::cout << " cell index: " << i;
std::cout << " start: " << gridCellStartIndices[i];
std::cout << " end: " << gridCellEndIndices[i] << std::endl;
}
else
{
std::cout << "PROBLEM cell index: " << i;
std::cout << " start: " << gridCellStartIndices[i];
std::cout << " end: " << gridCellEndIndices[i] << std::endl;
}
}
}
#endif
#if PROFILE
cudaEventRecord(beginEvent, 0);
#endif
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered <<<fullBlocksPerGrid, blockSize >>> (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
#if PROFILE
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&searchAlgoTime, beginEvent, endEvent);
std::cout << "search Time: " << searchAlgoTime << std::endl;
#endif
// - Update positions
kernUpdatePos <<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
glm::vec3 *tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
#if DEBUG
printcnt++;
#endif
}
__global__ void kernRearrangeBoidData(
int N, int *ordering,
glm::vec3 *originalPos, glm::vec3 *orderedPos,
glm::vec3 *originalVel, glm::vec3 *orderedVel) {
int newIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (newIndex >= N)
{
return;
}
// boid at newIndex corresponds to pos and val at boidIndex
int boidIndex = ordering[newIndex];
// reorder data in new buffer to reflect newIndex
orderedPos[newIndex] = originalPos[boidIndex];
orderedVel[newIndex] = originalVel[boidIndex];
}
__global__ void kernReplaceBoidVelData(
int N, int *ordering,
glm::vec3 *originalVel, glm::vec3 *orderedVel) {
int newIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (newIndex >= N)
{
return;
}
// boid at newIndex corresponds to pos and val at boidIndex
int boidIndex = ordering[newIndex];
// reorder data in new buffer to reflect newIndex
originalVel[boidIndex] = orderedVel[newIndex];
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
dim3 fullBlocksPerGrid = (numObjects + blockSize - 1) / blockSize;
dim3 fullBlocksPerGridForCells = (gridCellCount + blockSize - 1) / blockSize;
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
kernComputeIndices <<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd1 failed!");
kernResetIntBuffer << <fullBlocksPerGridForCells, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd2 failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGridForCells, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernRearrangeBoidData << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices,
dev_pos, dev_orderedPos, dev_vel1, dev_orderedVel);
checkCUDAErrorWithLine("kernRearrangeBoidData failed!");
#if DEBUG
int particleGridIndices[NUMBOIDS];
int particleArrayIndices[NUMBOIDS];
glm::vec3 originalpos[NUMBOIDS];
glm::vec3 orderedpos[NUMBOIDS];
glm::vec3 originalvel[NUMBOIDS];
glm::vec3 orderedvel[NUMBOIDS];
if (printcnt < maxprints){
cudaMemcpy(particleGridIndices, dev_particleGridIndices, sizeof(int) * NUMBOIDS, cudaMemcpyDeviceToHost);
cudaMemcpy(particleArrayIndices, dev_particleArrayIndices, sizeof(int) * NUMBOIDS, cudaMemcpyDeviceToHost);
cudaMemcpy(originalpos, dev_pos, sizeof(glm::vec3) * NUMBOIDS, cudaMemcpyDeviceToHost);
cudaMemcpy(orderedpos, dev_orderedPos, sizeof(glm::vec3) * NUMBOIDS, cudaMemcpyDeviceToHost);
cudaMemcpy(originalvel, dev_vel1, sizeof(glm::vec3) * NUMBOIDS, cudaMemcpyDeviceToHost);
cudaMemcpy(orderedvel, dev_orderedVel, sizeof(glm::vec3) * NUMBOIDS, cudaMemcpyDeviceToHost);
std::cout << "PARTICLES: " << std::endl;
for (int i = 0; i < NUMBOIDS; i++) {
std::cout << " particle index: " << i;
std::cout << " original boid index: " << particleArrayIndices[i];
std::cout << " grid index: " << particleGridIndices[i];
std::cout << " pos in original: " << originalpos[particleArrayIndices[i]].x << originalpos[particleArrayIndices[i]].y << originalpos[particleArrayIndices[i]].z;
std::cout << " pos in reordered: " << orderedpos[i].x << orderedpos[i].y << orderedpos[i].z;
std::cout << " vel in original: " << originalvel[particleArrayIndices[i]].x << originalvel[particleArrayIndices[i]].y << originalvel[particleArrayIndices[i]].z;
std::cout << " vel in reordered: " << orderedvel[i].x << orderedvel[i].y << orderedvel[i].z << std::endl;
}
}
#endif
#if PROFILE
cudaEventRecord(beginEvent, 0);
#endif
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_orderedPos, dev_orderedVel, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
#if PROFILE
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&randomPosKernelTime, beginEvent, endEvent);
std::cout << "search Time: " << searchAlgoTime << std::endl;
#endif
//Replace the updated velocities in their original indices
kernReplaceBoidVelData << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices,
dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernReplaceBoidVelData failed!");
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
// since we're using vel1 to hold the original ordering of the updated vel,
// no need to ping-pong
#if DEBUG
printcnt++;
#endif
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_orderedPos);
cudaFree(dev_orderedVel);
#if PROFILE
cudaEventDestroy(beginEvent);
cudaEventDestroy(endEvent);
#endif
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
b6326115c18bb15c0c2a76a0f86f4f7a6934e22b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudafeat/feature-online-cmvn-cuda.cu
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hipcub/hipcub.hpp>
#include "cudafeat/feature-online-cmvn-cuda.h"
#include "cudamatrix/cu-matrix.h"
#include "cudamatrix/cu-vector.h"
__host__ __device__ inline float2 operator-(const float2 &a, const float2 &b) {
float2 retval;
retval.x = a.x - b.x;
retval.y = a.y - b.y;
return retval;
}
__host__ __device__ inline float2 operator+(const float2 &a, const float2 &b) {
float2 retval;
retval.x = a.x + b.x;
retval.y = a.y + b.y;
return retval;
}
#if __CUDA_ARCH__ == 750
__launch_bounds__ (1024, 1)
#else
__launch_bounds__ (1024, 2)
#endif
__global__ void compute_cmvn_stats_kernel(const float *data, int32_t ldd,
int32_t num_frames, int32_t feat_dim,
float *stats, int32_t lds) {
typedef hipcub::BlockScan<float2, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
int32_t feat = blockIdx.x;
float2 running_sum = {0.0f, 0.0f};
// for each frame, keep threads alive for cub
for (int32_t r = 0; r < num_frames; r += blockDim.x) {
int32_t rid = r + threadIdx.x;
float val = 0.0f;
if (rid < num_frames) {
// uncoalesced, could transpose data or do some shared memory swizzling...
val = data[rid * ldd + feat];
}
float2 sum = {val, val * val}; // this elements value and value squared
float2 psum; // row prefix sum
float2 total; // total count
BlockScan(temp_storage).InclusiveSum(sum, psum, total);
// offset by running sum
psum = psum + running_sum;
// increase running sum by new total
running_sum = running_sum + total;
// un-coalesced
if (rid < num_frames) {
reinterpret_cast<float2 *>(&stats[rid * lds])[feat] = psum;
}
}
}
__global__ void apply_cmvn_kernel(
int32_t cmvn_window, bool var_norm, bool mean_norm, const float *feat_in,
int32_t ldi, int32_t num_rows, int32_t num_cols,
const float *__restrict__ stats, int32_t lds,
const float *__restrict__ global_stats, int32_t ldg, int32_t global_frames,
const float *__restrict__ speaker_stats, int32_t ldss,
int32_t speaker_frames, float *feat_out, int32_t ldo) {
int32_t r = blockIdx.x;
for (int c = threadIdx.x; c < num_cols; c += blockDim.x) {
float2 frame_stats =
reinterpret_cast<const float2 __restrict__ *>(&stats[r * lds])[c];
float val = feat_in[r * ldi + c];
float window_length = min(r + 1, cmvn_window);
// we have to subtract row r-cmvn_window stats
if (r >= cmvn_window) {
// window starting row
int32_t o = r - cmvn_window;
// stats at the start row of the window that must be removed
float2 ostats =
reinterpret_cast<const float2 __restrict__ *>(&stats[o * lds])[c];
// remove start of the window stats
frame_stats = frame_stats - ostats;
}
// Smooth stats by speaker frames if necessary
float smooth_frames = cmvn_window - window_length;
if (smooth_frames > 0 && speaker_frames > 0) {
float count_from_speaker = min(smooth_frames, (float)speaker_frames);
float speaker_count = speaker_stats[num_cols];
if (count_from_speaker > 0.0) {
float alpha = count_from_speaker / speaker_count;
frame_stats.x += alpha * speaker_stats[c]; // update mean
frame_stats.y += alpha * speaker_stats[ldss + c]; // update variance
window_length += alpha * speaker_count; // update window length
// recompute smooth frames now that we have speaker stats
smooth_frames = cmvn_window - window_length;
}
}
// Smooth stats by global frames if necessary
if (smooth_frames > 0 && global_frames > 0) {
float count_from_global = min(smooth_frames, (float)global_frames);
float global_count = global_stats[num_cols];
if (count_from_global > 0.0) {
float alpha = count_from_global / global_count;
frame_stats.x += alpha * global_stats[c]; // update mean
frame_stats.y += alpha * global_stats[ldg + c]; // update variance
window_length += alpha * global_count; // update window length
}
}
float mean = frame_stats.x / window_length;
float var = frame_stats.y / window_length - mean * mean;
float floor = 1e-20;
if (var < floor) // avoid dividing by zero
var = floor;
if (!var_norm) {
// skip variance normalization
var = 1.0f;
}
if (!mean_norm) {
assert(false);
// skip mean normalization
mean = 0.0f;
}
// shift by mean and scale by variance
feat_out[r * ldo + c] = (val - mean) / sqrtf(var);
}
}
namespace kaldi {
void CudaOnlineCmvn::ComputeFeatures(const CuMatrixBase<BaseFloat> &feats_in,
CuMatrix<BaseFloat> *feats_out) {
int32_t num_frames = feats_in.NumRows();
int32_t feat_dim = feats_in.NumCols();
feats_out->Resize(num_frames, feat_dim, kUndefined);
CuMatrix<float> stats(num_frames, feat_dim * 2, kUndefined);
int threads = 1024;
int blocks = feat_dim;
// compute windowed sum/sum2 prefix sum along column of feats
hipLaunchKernelGGL(( compute_cmvn_stats_kernel), dim3(blocks), dim3(threads), 0, 0,
feats_in.Data(), feats_in.Stride(), num_frames, feat_dim, stats.Data(),
stats.Stride());
CU_SAFE_CALL(hipGetLastError());
threads = (feat_dim + 31) / 32 * 32; // round up to 32 threads
if (threads > 1024) threads = 1024;
const CuMatrix<float> &gstats = cmvn_state_.global_cmvn_stats;
const CuMatrix<float> &sstats = cmvn_state_.speaker_cmvn_stats;
int global_frames = opts_.global_frames;
int speaker_frames = opts_.speaker_frames;
if (gstats.NumRows() == 0) global_frames = 0;
if (sstats.NumRows() == 0) speaker_frames = 0;
// apply cmvn
hipLaunchKernelGGL(( apply_cmvn_kernel), dim3(num_frames), dim3(threads), 0, 0,
opts_.cmn_window, opts_.normalize_variance, opts_.normalize_mean,
feats_in.Data(), feats_in.Stride(), num_frames, feat_dim, stats.Data(),
stats.Stride(), gstats.Data(), gstats.Stride(), global_frames,
sstats.Data(), sstats.Stride(), speaker_frames, feats_out->Data(),
feats_out->Stride());
CU_SAFE_CALL(hipGetLastError());
}
}
| b6326115c18bb15c0c2a76a0f86f4f7a6934e22b.cu | // cudafeat/feature-online-cmvn-cuda.cu
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cub/cub.cuh>
#include "cudafeat/feature-online-cmvn-cuda.h"
#include "cudamatrix/cu-matrix.h"
#include "cudamatrix/cu-vector.h"
__host__ __device__ inline float2 operator-(const float2 &a, const float2 &b) {
float2 retval;
retval.x = a.x - b.x;
retval.y = a.y - b.y;
return retval;
}
__host__ __device__ inline float2 operator+(const float2 &a, const float2 &b) {
float2 retval;
retval.x = a.x + b.x;
retval.y = a.y + b.y;
return retval;
}
#if __CUDA_ARCH__ == 750
__launch_bounds__ (1024, 1)
#else
__launch_bounds__ (1024, 2)
#endif
__global__ void compute_cmvn_stats_kernel(const float *data, int32_t ldd,
int32_t num_frames, int32_t feat_dim,
float *stats, int32_t lds) {
typedef cub::BlockScan<float2, 1024> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
int32_t feat = blockIdx.x;
float2 running_sum = {0.0f, 0.0f};
// for each frame, keep threads alive for cub
for (int32_t r = 0; r < num_frames; r += blockDim.x) {
int32_t rid = r + threadIdx.x;
float val = 0.0f;
if (rid < num_frames) {
// uncoalesced, could transpose data or do some shared memory swizzling...
val = data[rid * ldd + feat];
}
float2 sum = {val, val * val}; // this elements value and value squared
float2 psum; // row prefix sum
float2 total; // total count
BlockScan(temp_storage).InclusiveSum(sum, psum, total);
// offset by running sum
psum = psum + running_sum;
// increase running sum by new total
running_sum = running_sum + total;
// un-coalesced
if (rid < num_frames) {
reinterpret_cast<float2 *>(&stats[rid * lds])[feat] = psum;
}
}
}
__global__ void apply_cmvn_kernel(
int32_t cmvn_window, bool var_norm, bool mean_norm, const float *feat_in,
int32_t ldi, int32_t num_rows, int32_t num_cols,
const float *__restrict__ stats, int32_t lds,
const float *__restrict__ global_stats, int32_t ldg, int32_t global_frames,
const float *__restrict__ speaker_stats, int32_t ldss,
int32_t speaker_frames, float *feat_out, int32_t ldo) {
int32_t r = blockIdx.x;
for (int c = threadIdx.x; c < num_cols; c += blockDim.x) {
float2 frame_stats =
reinterpret_cast<const float2 __restrict__ *>(&stats[r * lds])[c];
float val = feat_in[r * ldi + c];
float window_length = min(r + 1, cmvn_window);
// we have to subtract row r-cmvn_window stats
if (r >= cmvn_window) {
// window starting row
int32_t o = r - cmvn_window;
// stats at the start row of the window that must be removed
float2 ostats =
reinterpret_cast<const float2 __restrict__ *>(&stats[o * lds])[c];
// remove start of the window stats
frame_stats = frame_stats - ostats;
}
// Smooth stats by speaker frames if necessary
float smooth_frames = cmvn_window - window_length;
if (smooth_frames > 0 && speaker_frames > 0) {
float count_from_speaker = min(smooth_frames, (float)speaker_frames);
float speaker_count = speaker_stats[num_cols];
if (count_from_speaker > 0.0) {
float alpha = count_from_speaker / speaker_count;
frame_stats.x += alpha * speaker_stats[c]; // update mean
frame_stats.y += alpha * speaker_stats[ldss + c]; // update variance
window_length += alpha * speaker_count; // update window length
// recompute smooth frames now that we have speaker stats
smooth_frames = cmvn_window - window_length;
}
}
// Smooth stats by global frames if necessary
if (smooth_frames > 0 && global_frames > 0) {
float count_from_global = min(smooth_frames, (float)global_frames);
float global_count = global_stats[num_cols];
if (count_from_global > 0.0) {
float alpha = count_from_global / global_count;
frame_stats.x += alpha * global_stats[c]; // update mean
frame_stats.y += alpha * global_stats[ldg + c]; // update variance
window_length += alpha * global_count; // update window length
}
}
float mean = frame_stats.x / window_length;
float var = frame_stats.y / window_length - mean * mean;
float floor = 1e-20;
if (var < floor) // avoid dividing by zero
var = floor;
if (!var_norm) {
// skip variance normalization
var = 1.0f;
}
if (!mean_norm) {
assert(false);
// skip mean normalization
mean = 0.0f;
}
// shift by mean and scale by variance
feat_out[r * ldo + c] = (val - mean) / sqrtf(var);
}
}
namespace kaldi {
void CudaOnlineCmvn::ComputeFeatures(const CuMatrixBase<BaseFloat> &feats_in,
CuMatrix<BaseFloat> *feats_out) {
int32_t num_frames = feats_in.NumRows();
int32_t feat_dim = feats_in.NumCols();
feats_out->Resize(num_frames, feat_dim, kUndefined);
CuMatrix<float> stats(num_frames, feat_dim * 2, kUndefined);
int threads = 1024;
int blocks = feat_dim;
// compute windowed sum/sum2 prefix sum along column of feats
compute_cmvn_stats_kernel<<<blocks, threads>>>(
feats_in.Data(), feats_in.Stride(), num_frames, feat_dim, stats.Data(),
stats.Stride());
CU_SAFE_CALL(cudaGetLastError());
threads = (feat_dim + 31) / 32 * 32; // round up to 32 threads
if (threads > 1024) threads = 1024;
const CuMatrix<float> &gstats = cmvn_state_.global_cmvn_stats;
const CuMatrix<float> &sstats = cmvn_state_.speaker_cmvn_stats;
int global_frames = opts_.global_frames;
int speaker_frames = opts_.speaker_frames;
if (gstats.NumRows() == 0) global_frames = 0;
if (sstats.NumRows() == 0) speaker_frames = 0;
// apply cmvn
apply_cmvn_kernel<<<num_frames, threads>>>(
opts_.cmn_window, opts_.normalize_variance, opts_.normalize_mean,
feats_in.Data(), feats_in.Stride(), num_frames, feat_dim, stats.Data(),
stats.Stride(), gstats.Data(), gstats.Stride(), global_frames,
sstats.Data(), sstats.Stride(), speaker_frames, feats_out->Data(),
feats_out->Stride());
CU_SAFE_CALL(cudaGetLastError());
}
}
|
053e7aef689e2ec3171f4254eb9482e660819e06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if defined(COSMOLOGY) && defined(PARTICLES_GPU)
#include "cosmology_functions_gpu.h"
// __device__ Real Get_Hubble_Parameter_dev( Real a, Real H0, Real Omega_M, Real Omega_L, Real Omega_K ){
// Real a2 = a * a;
// Real a3 = a2 * a;
// Real factor = ( Omega_M/a3 + Omega_K/a2 + Omega_L );
// return H0 * sqrt(factor);
//
// }
void __global__ Change_GAS_Frame_System_kernel( Real dens_factor, Real momentum_factor, Real energy_factor,
int nx, int ny, int nz, Real *density_d, Real *momentum_x_d, Real *momentum_y_d, Real *momentum_z_d,
Real *Energy_d, Real *GasEnergy_d ){
int tid_x, tid_y, tid_z, tid_grid;
tid_x = blockIdx.x * blockDim.x + threadIdx.x;
tid_y = blockIdx.y * blockDim.y + threadIdx.y;
tid_z = blockIdx.z * blockDim.z + threadIdx.z;
if (tid_x >= nx || tid_y >= ny || tid_z >= nz ) return;
tid_grid = tid_x + tid_y*nx + tid_z*nx*ny;
density_d[tid_grid] = density_d[tid_grid] * dens_factor;
momentum_x_d[tid_grid] = momentum_x_d[tid_grid] * momentum_factor;
momentum_y_d[tid_grid] = momentum_y_d[tid_grid] * momentum_factor;
momentum_z_d[tid_grid] = momentum_z_d[tid_grid] * momentum_factor;
Energy_d[tid_grid] = Energy_d[tid_grid] * energy_factor;
#ifdef DE
GasEnergy_d[tid_grid] = GasEnergy_d[tid_grid] * energy_factor;
#endif
//NOTE If CHEMISTRY_GPU I need to add the conversion for the chemical species here
}
void Grid3D::Change_GAS_Frame_System_GPU( bool forward ){
Real dens_factor, momentum_factor, energy_factor;
if ( forward ){
dens_factor = 1 / Cosmo.rho_0_gas;
momentum_factor = 1 / Cosmo.rho_0_gas / Cosmo.v_0_gas * Cosmo.current_a;
energy_factor = 1 / Cosmo.rho_0_gas / Cosmo.v_0_gas / Cosmo.v_0_gas * Cosmo.current_a * Cosmo.current_a;
}
else{
dens_factor = Cosmo.rho_0_gas;
momentum_factor = Cosmo.rho_0_gas * Cosmo.v_0_gas / Cosmo.current_a;
energy_factor = Cosmo.rho_0_gas * Cosmo.v_0_gas * Cosmo.v_0_gas / Cosmo.current_a / Cosmo.current_a;
}
int nx, ny, nz;
nx = H.nx;
ny = H.ny;
nz = H.nz;
// set values for GPU kernels
int tpb_x = TPBX_COSMO;
int tpb_y = TPBY_COSMO;
int tpb_z = TPBZ_COSMO;
int ngrid_x = (nx - 1) / tpb_x + 1;
int ngrid_y = (ny - 1) / tpb_y + 1;
int ngrid_z = (nz - 1) / tpb_z + 1;
// number of blocks per 1D grid
dim3 dim3dGrid(ngrid_x, ngrid_y, ngrid_z);
// number of threads per 1D block
dim3 dim3dBlock(tpb_x, tpb_y, tpb_z);
Real *GasEnergy_d;
#ifdef DE
GasEnergy_d = C.d_GasEnergy;
#else
GasEnergy_d = NULL;
#endif
hipLaunchKernelGGL(Change_GAS_Frame_System_kernel, dim3dGrid, dim3dBlock, 0, 0, dens_factor, momentum_factor, energy_factor, nx, ny, nz,
C.d_density, C.d_momentum_x, C.d_momentum_y, C.d_momentum_z, C.d_Energy, GasEnergy_d );
}
#endif //COSMOLOGY | 053e7aef689e2ec3171f4254eb9482e660819e06.cu | #if defined(COSMOLOGY) && defined(PARTICLES_GPU)
#include "cosmology_functions_gpu.h"
// __device__ Real Get_Hubble_Parameter_dev( Real a, Real H0, Real Omega_M, Real Omega_L, Real Omega_K ){
// Real a2 = a * a;
// Real a3 = a2 * a;
// Real factor = ( Omega_M/a3 + Omega_K/a2 + Omega_L );
// return H0 * sqrt(factor);
//
// }
void __global__ Change_GAS_Frame_System_kernel( Real dens_factor, Real momentum_factor, Real energy_factor,
int nx, int ny, int nz, Real *density_d, Real *momentum_x_d, Real *momentum_y_d, Real *momentum_z_d,
Real *Energy_d, Real *GasEnergy_d ){
int tid_x, tid_y, tid_z, tid_grid;
tid_x = blockIdx.x * blockDim.x + threadIdx.x;
tid_y = blockIdx.y * blockDim.y + threadIdx.y;
tid_z = blockIdx.z * blockDim.z + threadIdx.z;
if (tid_x >= nx || tid_y >= ny || tid_z >= nz ) return;
tid_grid = tid_x + tid_y*nx + tid_z*nx*ny;
density_d[tid_grid] = density_d[tid_grid] * dens_factor;
momentum_x_d[tid_grid] = momentum_x_d[tid_grid] * momentum_factor;
momentum_y_d[tid_grid] = momentum_y_d[tid_grid] * momentum_factor;
momentum_z_d[tid_grid] = momentum_z_d[tid_grid] * momentum_factor;
Energy_d[tid_grid] = Energy_d[tid_grid] * energy_factor;
#ifdef DE
GasEnergy_d[tid_grid] = GasEnergy_d[tid_grid] * energy_factor;
#endif
//NOTE If CHEMISTRY_GPU I need to add the conversion for the chemical species here
}
void Grid3D::Change_GAS_Frame_System_GPU( bool forward ){
Real dens_factor, momentum_factor, energy_factor;
if ( forward ){
dens_factor = 1 / Cosmo.rho_0_gas;
momentum_factor = 1 / Cosmo.rho_0_gas / Cosmo.v_0_gas * Cosmo.current_a;
energy_factor = 1 / Cosmo.rho_0_gas / Cosmo.v_0_gas / Cosmo.v_0_gas * Cosmo.current_a * Cosmo.current_a;
}
else{
dens_factor = Cosmo.rho_0_gas;
momentum_factor = Cosmo.rho_0_gas * Cosmo.v_0_gas / Cosmo.current_a;
energy_factor = Cosmo.rho_0_gas * Cosmo.v_0_gas * Cosmo.v_0_gas / Cosmo.current_a / Cosmo.current_a;
}
int nx, ny, nz;
nx = H.nx;
ny = H.ny;
nz = H.nz;
// set values for GPU kernels
int tpb_x = TPBX_COSMO;
int tpb_y = TPBY_COSMO;
int tpb_z = TPBZ_COSMO;
int ngrid_x = (nx - 1) / tpb_x + 1;
int ngrid_y = (ny - 1) / tpb_y + 1;
int ngrid_z = (nz - 1) / tpb_z + 1;
// number of blocks per 1D grid
dim3 dim3dGrid(ngrid_x, ngrid_y, ngrid_z);
// number of threads per 1D block
dim3 dim3dBlock(tpb_x, tpb_y, tpb_z);
Real *GasEnergy_d;
#ifdef DE
GasEnergy_d = C.d_GasEnergy;
#else
GasEnergy_d = NULL;
#endif
hipLaunchKernelGGL(Change_GAS_Frame_System_kernel, dim3dGrid, dim3dBlock, 0, 0, dens_factor, momentum_factor, energy_factor, nx, ny, nz,
C.d_density, C.d_momentum_x, C.d_momentum_y, C.d_momentum_z, C.d_Energy, GasEnergy_d );
}
#endif //COSMOLOGY |
de14cd35cb3db5e870ae57c4c192ba4a103a84c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ========================================================
Fonctions propres au GPU
======================================================== */
__global__ void _reverse_array(int *A_in, int n, int *A_out)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
A_out[i] = A_in[n-1-i];
}
/*
nb_blocks = N/BLOCK_SIZE;
nb_threads_per_block = BLOCK_SIZE
nb_threads = N;
UN TABLEAU BLOCK_SIZE par block
N/BLOCK_SIZE blocks
*/
__global__ void _reverse_array2(int *A_in, int n, int *A_out)
{
__shared__ int s[BLOCK_SIZE]; //tableau propre chaque thread
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
s[threadIdx.x] = A_in[n-1-i];
__syncthreads();
A_out[i] = s[threadIdx.x];
}
| de14cd35cb3db5e870ae57c4c192ba4a103a84c0.cu | /* ========================================================
Fonctions propres au GPU
======================================================== */
__global__ void _reverse_array(int *A_in, int n, int *A_out)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
A_out[i] = A_in[n-1-i];
}
/*
nb_blocks = N/BLOCK_SIZE;
nb_threads_per_block = BLOCK_SIZE
nb_threads = N;
UN TABLEAU BLOCK_SIZE par block
N/BLOCK_SIZE blocks
*/
__global__ void _reverse_array2(int *A_in, int n, int *A_out)
{
__shared__ int s[BLOCK_SIZE]; //tableau propre à chaque thread
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
s[threadIdx.x] = A_in[n-1-i];
__syncthreads();
A_out[i] = s[threadIdx.x];
}
|
c1d95686956394d392edf74e44575e4b92b537fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <string>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
//#include "helper_functions.h"
//#include "helper_cuda.h"
//#include "exception.h"
#include <helper_functions.h>
#include <helper_cuda.h>
#include <exception.h>
#include "gputimer.h"
using namespace std;
cv::Mat imageRGBA;
cv::Mat imageGrey;
uchar4 *d_rgbaImage__;
unsigned char *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
void preProcess(uchar4 **inputImage, unsigned char **greyImage,
uchar4 **d_rgbaImage, unsigned char **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
cv::Mat image;
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = imageGrey.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
checkCudaErrors(hipMalloc(d_rgbaImage, sizeof(uchar4) * numPixels));
checkCudaErrors(hipMalloc(d_greyImage, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char))); //make sure no memory is left laying around
//copy input array to the GPU
checkCudaErrors(hipMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice));
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file) {
const int numPixels = numRows() * numCols();
//copy the output back to the host
checkCudaErrors(hipMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, sizeof(unsigned char) * numPixels, hipMemcpyDeviceToHost));
//output the image
cv::imwrite(output_file.c_str(), imageGrey);
//cleanup
hipFree(d_rgbaImage__);
hipFree(d_greyImage__);
}
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx = threadIdx.x;
int blk = blockIdx.x;
uchar4 rgba = rgbaImage[blk*numCols+idx];
float I = .299f *rgba.x + .587f *rgba.y + .114f *rgba.z;
greyImage[blk*numCols+idx] = I;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize((int) numCols, 1, 1); //TODO
const dim3 gridSize((int) numRows, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
int main(int argc, char **argv) {
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file);
GpuTimer timer;
timer.Start();
//call the students' code
your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
timer.Stop();
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
printf("\n");
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the grey image
postProcess(output_file);
return 0;
}
| c1d95686956394d392edf74e44575e4b92b537fb.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <string>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
//#include "helper_functions.h"
//#include "helper_cuda.h"
//#include "exception.h"
#include <helper_functions.h>
#include <helper_cuda.h>
#include <exception.h>
#include "gputimer.h"
using namespace std;
cv::Mat imageRGBA;
cv::Mat imageGrey;
uchar4 *d_rgbaImage__;
unsigned char *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
void preProcess(uchar4 **inputImage, unsigned char **greyImage,
uchar4 **d_rgbaImage, unsigned char **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
cv::Mat image;
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = imageGrey.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
checkCudaErrors(cudaMalloc(d_rgbaImage, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMalloc(d_greyImage, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char))); //make sure no memory is left laying around
//copy input array to the GPU
checkCudaErrors(cudaMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice));
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file) {
const int numPixels = numRows() * numCols();
//copy the output back to the host
checkCudaErrors(cudaMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, sizeof(unsigned char) * numPixels, cudaMemcpyDeviceToHost));
//output the image
cv::imwrite(output_file.c_str(), imageGrey);
//cleanup
cudaFree(d_rgbaImage__);
cudaFree(d_greyImage__);
}
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx = threadIdx.x;
int blk = blockIdx.x;
uchar4 rgba = rgbaImage[blk*numCols+idx];
float I = .299f *rgba.x + .587f *rgba.y + .114f *rgba.z;
greyImage[blk*numCols+idx] = I;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize((int) numCols, 1, 1); //TODO
const dim3 gridSize((int) numRows, 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
int main(int argc, char **argv) {
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file);
GpuTimer timer;
timer.Start();
//call the students' code
your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
timer.Stop();
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
printf("\n");
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the grey image
postProcess(output_file);
return 0;
}
|
faa8360ba1c249ae336d48f874833c3a37472c4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 30.05.2019
//
#include <array/NDArrayFactory.h>
#include <array/ResultSet.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/one_hot.h>
#include <numeric>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - indices, z - output
template <typename X, typename Z>
SD_KERNEL static void onehotCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const sd::LongType axis, const sd::LongType depth,
const Z on, const Z off) {
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int xRank, zRank;
__shared__ sd::LongType zLen, totalThreads, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coord = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coord);
const auto zOffset = shape::getOffset(zShapeInfo, coord);
const auto depthCoord = coord[axis];
for (sd::LongType j = axis; j < zRank - 1; ++j) coord[j] = coord[j + 1];
const auto xOffset = shape::getOffset(xShapeInfo, coord);
const sd::LongType idx = x[xOffset];
z[zOffset] = depthCoord == idx ? on : off;
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void onehotCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const sd::LongType axis, const sd::LongType depth,
const double on, const double off) {
hipLaunchKernelGGL(( onehotCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, axis, depth,
static_cast<Y>(on), static_cast<Y>(off));
}
///////////////////////////////////////////////////////////////////
void onehot(const sd::LaunchContext *context, const NDArray *indices, NDArray *output, const sd::LongType axis,
const sd::LongType depth, const double on, const double off) {
const auto xType = indices->dataType();
const auto zType = output->dataType();
dim3 oneHotLaunch = oneHotDims(output->lengthOf(),output->rankOf(), sizeof(decltype(*output->shapeInfo())));
PointersManager manager(context, "onehot");
NDArray::prepareSpecialUse({output}, {indices});
BUILD_DOUBLE_SELECTOR(
xType, zType, onehotCudaLauncher,
(oneHotLaunch.y, oneHotLaunch.x, oneHotLaunch.z, context->getCudaStream(), indices->specialBuffer(),
indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), axis, depth, on, off),
SD_COMMON_TYPES, SD_COMMON_TYPES);
NDArray::registerSpecialUse({output}, {indices});
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
| faa8360ba1c249ae336d48f874833c3a37472c4c.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 30.05.2019
//
#include <array/NDArrayFactory.h>
#include <array/ResultSet.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/one_hot.h>
#include <numeric>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - indices, z - output
template <typename X, typename Z>
SD_KERNEL static void onehotCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const sd::LongType axis, const sd::LongType depth,
const Z on, const Z off) {
const auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
__shared__ int xRank, zRank;
__shared__ sd::LongType zLen, totalThreads, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<sd::LongType *>(shmem);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coord = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coord);
const auto zOffset = shape::getOffset(zShapeInfo, coord);
const auto depthCoord = coord[axis];
for (sd::LongType j = axis; j < zRank - 1; ++j) coord[j] = coord[j + 1];
const auto xOffset = shape::getOffset(xShapeInfo, coord);
const sd::LongType idx = x[xOffset];
z[zOffset] = depthCoord == idx ? on : off;
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void onehotCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const void *vx, const sd::LongType *xShapeInfo, void *vz,
const sd::LongType *zShapeInfo, const sd::LongType axis, const sd::LongType depth,
const double on, const double off) {
onehotCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, axis, depth,
static_cast<Y>(on), static_cast<Y>(off));
}
///////////////////////////////////////////////////////////////////
void onehot(const sd::LaunchContext *context, const NDArray *indices, NDArray *output, const sd::LongType axis,
const sd::LongType depth, const double on, const double off) {
const auto xType = indices->dataType();
const auto zType = output->dataType();
dim3 oneHotLaunch = oneHotDims(output->lengthOf(),output->rankOf(), sizeof(decltype(*output->shapeInfo())));
PointersManager manager(context, "onehot");
NDArray::prepareSpecialUse({output}, {indices});
BUILD_DOUBLE_SELECTOR(
xType, zType, onehotCudaLauncher,
(oneHotLaunch.y, oneHotLaunch.x, oneHotLaunch.z, context->getCudaStream(), indices->specialBuffer(),
indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), axis, depth, on, off),
SD_COMMON_TYPES, SD_COMMON_TYPES);
NDArray::registerSpecialUse({output}, {indices});
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
057643fd43193680be5da628a5c0808948611a5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
hipMalloc( &d_a, SIZE*sizeof(float));
hipMalloc( &d_b, SIZE*sizeof(float));
hipMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
hipMemcpy( d_a, a, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_b, b, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_c, c, SIZE*sizeof(float), hipMemcpyHostToDevice );
start = clock();
hipLaunchKernelGGL(( VectorAdd), dim3(1), dim3(SIZE) , 0, 0, d_a, d_b, d_c, SIZE);
end = clock();
hipMemcpy( c, d_c, SIZE*sizeof(float), hipMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
}
| 057643fd43193680be5da628a5c0808948611a5e.cu | #include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
cudaMalloc( &d_a, SIZE*sizeof(float));
cudaMalloc( &d_b, SIZE*sizeof(float));
cudaMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
cudaMemcpy( d_a, a, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(float), cudaMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
cudaMemcpy( c, d_c, SIZE*sizeof(float), cudaMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
}
|
ce78bd3c701a7d594207f5a95e8b4a88f15c233a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus;
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| ce78bd3c701a7d594207f5a95e8b4a88f15c233a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus;
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
9554134efa2c7198bc449c8a75f2a34fbba10832.hip | // !!! This is a file automatically generated by hipify!!!
/*!
\file Matrix.cu
\brief Implementation of member functions of classes Dense_Matrix, CSR_Matrix and COO_Matrix
*/
#include <cassert>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Matrix.h"
#include "kernels.h"
#include "header.h"
//----------------------------------------------------------------------------------------------------------------------
/* Member functions for Dense matrix class*/
//! Move assignment operator for Dense Matrix class
/*!
It moves the CPU and GPU resources/state of the temporary object(input) into the current class object.
\param[in,out] mat rvalue reference which binds to dense matrix class temporary object
\return reference(lvalue) to the current object
*/
Dense_Matrix& Dense_Matrix::operator= (Dense_Matrix&& mat)
{
assert(GetRows() == mat.GetRows());
assert(GetCols() == mat.GetCols());
assert(GetLda() == mat.GetLda());
assert(GetOrder() == mat.GetOrder());
cpu_exists = mat.cpu_exists;
cpu_values = mat.cpu_values;
mat.cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
mat.cpu_values = nullptr;
gpu_exists = mat.gpu_exists;
gpu_values = mat.gpu_values;
mat.gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
mat.gpu_values = nullptr;
return *this;
}
//! Copy assignment operator for dense matrix class
/*!
This copies the resources/state of the input object(lvalue reference) to the current class object.
\param[in] mat lvalue reference which binds to an lvalue -dense matrix class object
\return reference(lvalue) to the current object
*/
Dense_Matrix& Dense_Matrix::operator= (const Dense_Matrix& mat)
{
assert(GetRows() == mat.GetRows());
assert(GetCols() == mat.GetCols());
assert(GetLda() == mat.GetLda());
assert(GetOrder() == mat.GetOrder());
if (mat.ExistsCPU() == true)
{
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
Copy_array_cpu_to_cpu(mat.GetCPUValues(), GetCPUValues(), GetLda() * GetCols());
}
else
Deallocate_Memory(LOCATION::CPU);
if (mat.ExistsGPU() == true)
{
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
Copy_array_gpu_to_gpu(mat.GetGPUValues(), GetGPUValues(), GetLda() * GetCols());
}
else
Deallocate_Memory(LOCATION::GPU);
return *this;
}
//! Move constructor for dense matrix class
/*!
Forms a dense matrix object by moving CPU and GPU resources/state of a temporary object into it
\param[in,out] mat rvalue reference which binds to dense matrix class temporary object
*/
Dense_Matrix::Dense_Matrix(Dense_Matrix&& mat)
: rows{ mat.rows }, cols{ mat.cols }, lda{ mat.lda }, order{ mat.order }
{
cpu_exists = mat.cpu_exists;
cpu_values = mat.cpu_values;
mat.cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
mat.cpu_values = nullptr;
gpu_exists = mat.gpu_exists;
gpu_values = mat.gpu_values;
mat.gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
mat.gpu_values = nullptr;
}
//! Copy constructor for dense matrix class
/*!
Forms a dense matrix object by copying CPU and GPU resources/state of the input(lvalue reference) into it
\param[in] mat lvalue reference which binds to an lvalue -dense matrix class object
*/
Dense_Matrix::Dense_Matrix(const Dense_Matrix& mat)
: rows{ mat.rows }, cols{ mat.cols }, lda{ mat.lda }, order{ mat.order }
{
if (mat.ExistsCPU() == true)
{
Allocate_Memory(LOCATION::CPU);
Copy_array_cpu_to_cpu(mat.cpu_values, this->cpu_values, lda * cols);
}
if (mat.ExistsGPU() == true)
{
Allocate_Memory(LOCATION::GPU);
Copy_array_gpu_to_gpu(mat.gpu_values, this->gpu_values, lda * cols);
}
}
//! Allocates memory on the specified location to store dense matirx's internal ararys
/*!
Allocates memory on the specified location based on the leading dimension and the number of colums of the dense matrix object
\param[in] loc enum type which indicates the location -either GPU or CPU, where the dense matrix values are to be stored
*/
void Dense_Matrix::Allocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == false)
{
cpu_values = new DoubleComplex[GetLda() * GetCols()];
cpu_exists = CPU_EXISTENCE::EXISTENT;
}
else if (loc == LOCATION::GPU && ExistsGPU() == false)
{
hipMalloc((void**)&gpu_values, GetLda() * GetCols() * sizeof(DoubleComplex));
gpu_exists = GPU_EXISTENCE::EXISTENT;
}
}
//! A parameterized constructor for dense matrix class
/*!
\param[in] num_rows number of rows in dense matrix object being formed
\param[in] num_cols number of columns in dense matrix object being formed
\param[in] lda_mat leading dimension of the dense matrix object being formed.(Usually the number of rows is rounded up to a certain value to give lda.)
\param[in] order_mat storage order of the dense matrix object being formed
\param[in] cpu_exists enum type variable which indicates if memory is to be allocated on CPU for the object's internals(the values array) being formed
\param[in] gpu_exists enum type variable which indicates if memory is to be allocated on GPU for the object's internals(the values array) being formed
*/
Dense_Matrix::Dense_Matrix(const int num_rows, const int num_cols, const int lda_mat, const ORDER order_mat, const CPU_EXISTENCE cpu_exists, const GPU_EXISTENCE gpu_exists)
: rows{ num_rows }, cols{ num_cols }, lda{ lda_mat }, order{ order_mat }
{
assert(lda >= rows);
if (cpu_exists == CPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::CPU);
if (gpu_exists == GPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::GPU);
}
//! Destructor for dense matrix class object
/*!
Called automatically when the dense matrix object is destroyed. It deallocates the acquired resources, if any.
*/
Dense_Matrix::~Dense_Matrix()
{
if (ExistsCPU() == true)
Deallocate_Memory(LOCATION::CPU);
if (ExistsGPU() == true)
Deallocate_Memory(LOCATION::GPU);
}
//! Copies dense matrix values from CPU to GPU
/*!
Allocates memory on GPU if required. Copies all values from CPU memory to GPU.
*/
void Dense_Matrix::CopyMatrix_cpu_to_gpu()
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
hipMemcpy(GetGPUValues(), GetCPUValues(), GetLda() * GetCols() * sizeof(DoubleComplex), hipMemcpyHostToDevice);
}
//! Copies dense matrix values from GPU to CPU
/*!
Allocates memory on CPU if required. Copies all values from GPU memory to CPU.
*/
void Dense_Matrix::CopyMatrix_gpu_to_cpu()
{
assert(ExistsGPU() == true);
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
hipMemcpy(GetCPUValues(), GetGPUValues(), GetLda() * GetCols() * sizeof(DoubleComplex), hipMemcpyDeviceToHost);
}
//! Copies a part of a dense matrix values from CPU to GPU
/*!
Allocates memory on GPU if required. Copies a part of matrix defined by starting and ending column and row indices from CPU memory to GPU memory
\param[in] col_start index of the starting column
\param[in] col_end index of the ending column
\param[in] row_start index of the starting row
\param[in] row_end index of the ending row
*/
void Dense_Matrix::CopyMatrix_cpu_to_gpu(int col_start, int col_end, int row_start, int row_end)//write version for submatrix -copy
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
int N = row_end - row_start + 1;
DoubleComplex* cpu_val, * gpu_val;
for (int i = col_start; i <= col_end; i++)
{
cpu_val = GetSpecificLocationPtrCPU(row_start, i);
gpu_val = GetSpecificLocationPtrGPU(row_start, i);
hipMemcpy(gpu_val, cpu_val, N * sizeof(DoubleComplex), hipMemcpyHostToDevice);
}
}
//! Copies a part of a dense matrix values from GPU to CPU
/*!
Allocates memory on CPU if required. Copies a part of matrix defined by starting and ending column and row indices from GPU memory to CPU memory
\param[in] col_start index of the starting column
\param[in] col_end index of the ending column
\param[in] row_start index of the starting row
\param[in] row_end index of the ending row
*/
void Dense_Matrix::CopyMatrix_gpu_to_cpu(int col_start, int col_end, int row_start, int row_end)
{
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
int N = row_end - row_start + 1;
DoubleComplex* cpu_val, * gpu_val;
for (int i = col_start; i <= col_end; i++)
{
cpu_val = GetSpecificLocationPtrCPU(row_start, i);
gpu_val = GetSpecificLocationPtrGPU(row_start, i);
hipMemcpy(cpu_val, gpu_val, N * sizeof(DoubleComplex), hipMemcpyDeviceToHost);
}
}
//! Deallocates specified location's resources of the dense matrix object
/*!
\param[in] loc enum type varaible which indicates the location(CPU/GPU) where the resources are to be dealloacted
*/
void Dense_Matrix::Deallocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == true)
{
delete[] cpu_values;
cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
cpu_values = nullptr;
}
if (loc == LOCATION::GPU && ExistsGPU() == true)
{
hipFree(gpu_values);
gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
gpu_values = nullptr;
}
}
//-------------------------------------------------------------------------------------------------------------
/* CSR Matrix member functions */
//! Allocates memory on the specified location to store CSR matirx's internal ararys
/*!
Allocates memory based on the number of rows and non zero elements of the CSR matrix object
\param[in] loc enum type which indicates the location -either GPU or CPU, where the CSR matrix related arrays are to be stored
*/
void CSR_Matrix::Allocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == false)
{
cpu_values = new DoubleComplex[Getnz()];
cpu_row_ptr = new int[GetRows() + 1];
cpu_col_ind = new int[Getnz()];
cpu_exists = CPU_EXISTENCE::EXISTENT;
}
else if (loc == LOCATION::GPU && ExistsGPU() == false)
{
hipMalloc((void**)&gpu_values, Getnz() * sizeof(DoubleComplex));
hipMalloc((void**)&gpu_row_ptr, (GetRows() + 1) * sizeof(int));
hipMalloc((void**)&gpu_col_ind, Getnz() * sizeof(int));
gpu_exists = GPU_EXISTENCE::EXISTENT;
}
}
//! A parameterized constructor for CSR matrix class
/*!
\param[in] num_rows number of rows in CSR matrix object being formed
\param[in] num_cols number of columns in CSR matrix object being formed
\param[in] nz_mat number of non zero elements in the CSR matrix object being formed
\param[in] cpu_exists enum type variable which indicates if memory is to be allocated on CPU for the object's internals(CSR matrix arrays) being formed
\param[in] gpu_exists enum type variable which indicates if memory is to be allocated on GPU for the object's internals(CSR matrix arrays) being formed
*/
CSR_Matrix::CSR_Matrix(const int num_rows, const int num_cols, const int nz_mat, const CPU_EXISTENCE cpu_exists, const GPU_EXISTENCE gpu_exists)
: rows{ num_rows }, cols{ num_cols }, nz{ nz_mat }
{
if (cpu_exists == CPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::CPU);
if (gpu_exists == GPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::GPU);
}
//! Destructor for CSR matrix class object
/*!
Called automatically when the CSR matrix object is destroyed. It deallocates the acquired resources, if any.
*/
CSR_Matrix::~CSR_Matrix()
{
if (ExistsCPU() == true)
Deallocate_Memory(LOCATION::CPU);
if (ExistsGPU() == true)
Deallocate_Memory(LOCATION::GPU);
}
//! Deallocates specified location's resources of the CSR matrix object
/*!
\param[in] loc enum type varaible which indicates the location(CPU/GPU) where the resources are to be dealloacted
*/
void CSR_Matrix::Deallocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == true)
{
delete[] cpu_values;
delete[] cpu_col_ind;
delete[] cpu_row_ptr;
cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
cpu_values = nullptr;
cpu_col_ind = nullptr;
cpu_row_ptr = nullptr;
}
if (loc == LOCATION::GPU && ExistsGPU() == true)
{
hipFree(gpu_values);
hipFree(gpu_row_ptr);
hipFree(gpu_col_ind);
gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
gpu_values = nullptr;
gpu_row_ptr = nullptr;
gpu_col_ind = nullptr;
}
}
//! Copies CSR matrix internal arrays from CPU to GPU
/*!
Allocates memory on GPU if required. Copies all internal arrays from CPU memory to GPU.
*/
void CSR_Matrix::CopyMatrix_cpu_to_gpu()
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
hipMemcpy(GetGPUValues(), GetCPUValues(), Getnz() * sizeof(DoubleComplex), hipMemcpyHostToDevice);
hipMemcpy(GetGPURowPtr(), GetCPURowPtr(), (GetRows() + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(GetGPUColInd() , GetCPUColInd(), Getnz() * sizeof(int), hipMemcpyHostToDevice);
}
//! Copies CSR matrix internal arrays from GPU to CPU
/*!
Allocates memory on CPU if required. Copies all internal arrays from GPU memory to CPU.
*/
void CSR_Matrix::CopyMatrix_gpu_to_cpu()
{
assert(ExistsGPU() == true);
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
hipMemcpy(GetCPUValues() , GetGPUValues() , Getnz() * sizeof(DoubleComplex), hipMemcpyDeviceToHost);
hipMemcpy(GetCPURowPtr(), GetGPURowPtr() , (GetRows() + 1) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy( GetCPUColInd(), GetGPUColInd(), Getnz() * sizeof(int), hipMemcpyDeviceToHost);
}
//-------------------------------------------------------------------------------------------------------------------
/* Member functions for COO matrix class */
//! Allocates memory on the specified location to store COO matirx internal ararys
/*!
Allocates memory based on the number of non zero elements of the COO matrix object
\param[in] loc enum type which indicates the location -either GPU or CPU, where the COO matrix related arrays are to be stored
*/
void COO_Matrix::Allocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == false)
{
cpu_values = new DoubleComplex[Getnz()];
cpu_row_ind = new int[Getnz()];
cpu_col_ind = new int[Getnz()];
cpu_exists = CPU_EXISTENCE::EXISTENT;
}
else if (loc == LOCATION::GPU && ExistsGPU()== false)
{
hipMalloc((void**)&gpu_values, Getnz() * sizeof(DoubleComplex));
hipMalloc((void**)&gpu_row_ind, Getnz() * sizeof(int));
hipMalloc((void**)&gpu_col_ind, Getnz() * sizeof(int));
gpu_exists = GPU_EXISTENCE::EXISTENT;
}
}
//! A parameterized constructor for COO matrix class
/*!
\param[in] num_rows number of rows in COO matrix object being formed
\param[in] num_cols number of columns in COO matrix object being formed
\param[in] mat_nz number of non zero elements in the COO matrix object being formed
\param[in] cpu_exists enum type variable which indicates if memory is to be allocated on CPU for the object's internals(COO matrix arrays) being formed
\param[in] gpu_exists enum type variable which indicates if memory is to be allocated on GPU for the object's internals(COO matrix arrays) being formed
*/
COO_Matrix::COO_Matrix(const int num_rows, const int num_cols, const int mat_nz, const CPU_EXISTENCE cpu_exists, const GPU_EXISTENCE gpu_exists)
: rows{ num_rows }, cols{ num_cols }, nz{ mat_nz }
{
if (cpu_exists == CPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::CPU);
if (gpu_exists == GPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::GPU);
}
//! Destructor for COO matrix class object
/*!
Called automatically when the COO matrix object is destroyed. It deallocates the acquired resources, if any.
*/
COO_Matrix::~COO_Matrix()
{
if (ExistsCPU() == true)
Deallocate_Memory(LOCATION::CPU);
if (ExistsGPU() == true)
Deallocate_Memory(LOCATION::GPU);
}
//! Deallocates specified location's resources of the COO matrix object
/*!
\param[in] loc enum type varaible which indicates the location(CPU/GPU) where the resources are to be dealloacted
*/
void COO_Matrix::Deallocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == true)
{
delete[] cpu_values;
delete[] cpu_col_ind;
delete[] cpu_row_ind;
cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
cpu_values = nullptr;
cpu_col_ind = nullptr;;
cpu_row_ind = nullptr;
}
if (loc == LOCATION::GPU && ExistsGPU() == true)
{
hipFree(gpu_values);
hipFree(gpu_row_ind);
hipFree(gpu_col_ind);
gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
gpu_values = nullptr;
gpu_row_ind = nullptr;
gpu_col_ind = nullptr;
}
}
//! Copies COO matrix internal arrays from CPU to GPU
/*!
Allocates memory on GPU if required. Copies all internal arrays from CPU memory to GPU.
*/
void COO_Matrix::CopyMatrix_cpu_to_gpu()
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
hipMemcpy(GetGPUValues(), GetCPUValues(), Getnz() * sizeof(DoubleComplex), hipMemcpyHostToDevice);
hipMemcpy(GetGPURowInd(), GetCPURowInd(), Getnz() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(GetGPUColInd(), GetCPUColInd(), Getnz() * sizeof(int), hipMemcpyHostToDevice);
}
//! Copies COO matrix internal arrays from GPU to CPU
/*!
Allocates memory on CPU if required. Copies all internal arrays from GPU memory to CPU.
*/
void COO_Matrix::CopyMatrix_gpu_to_cpu()
{
assert(ExistsGPU() == true);
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
hipMemcpy(GetCPUValues(), GetGPUValues(), Getnz() * sizeof(DoubleComplex), hipMemcpyDeviceToHost);
hipMemcpy(GetCPURowInd(), GetGPURowInd() , Getnz() * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(GetCPUColInd(), GetGPUColInd(), Getnz() * sizeof(int), hipMemcpyDeviceToHost);
} | 9554134efa2c7198bc449c8a75f2a34fbba10832.cu | /*!
\file Matrix.cu
\brief Implementation of member functions of classes Dense_Matrix, CSR_Matrix and COO_Matrix
*/
#include <cassert>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Matrix.h"
#include "kernels.h"
#include "header.h"
//----------------------------------------------------------------------------------------------------------------------
/* Member functions for Dense matrix class*/
//! Move assignment operator for Dense Matrix class
/*!
It moves the CPU and GPU resources/state of the temporary object(input) into the current class object.
\param[in,out] mat rvalue reference which binds to dense matrix class temporary object
\return reference(lvalue) to the current object
*/
Dense_Matrix& Dense_Matrix::operator= (Dense_Matrix&& mat)
{
assert(GetRows() == mat.GetRows());
assert(GetCols() == mat.GetCols());
assert(GetLda() == mat.GetLda());
assert(GetOrder() == mat.GetOrder());
cpu_exists = mat.cpu_exists;
cpu_values = mat.cpu_values;
mat.cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
mat.cpu_values = nullptr;
gpu_exists = mat.gpu_exists;
gpu_values = mat.gpu_values;
mat.gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
mat.gpu_values = nullptr;
return *this;
}
//! Copy assignment operator for dense matrix class
/*!
This copies the resources/state of the input object(lvalue reference) to the current class object.
\param[in] mat lvalue reference which binds to an lvalue -dense matrix class object
\return reference(lvalue) to the current object
*/
Dense_Matrix& Dense_Matrix::operator= (const Dense_Matrix& mat)
{
assert(GetRows() == mat.GetRows());
assert(GetCols() == mat.GetCols());
assert(GetLda() == mat.GetLda());
assert(GetOrder() == mat.GetOrder());
if (mat.ExistsCPU() == true)
{
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
Copy_array_cpu_to_cpu(mat.GetCPUValues(), GetCPUValues(), GetLda() * GetCols());
}
else
Deallocate_Memory(LOCATION::CPU);
if (mat.ExistsGPU() == true)
{
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
Copy_array_gpu_to_gpu(mat.GetGPUValues(), GetGPUValues(), GetLda() * GetCols());
}
else
Deallocate_Memory(LOCATION::GPU);
return *this;
}
//! Move constructor for dense matrix class
/*!
Forms a dense matrix object by moving CPU and GPU resources/state of a temporary object into it
\param[in,out] mat rvalue reference which binds to dense matrix class temporary object
*/
Dense_Matrix::Dense_Matrix(Dense_Matrix&& mat)
: rows{ mat.rows }, cols{ mat.cols }, lda{ mat.lda }, order{ mat.order }
{
cpu_exists = mat.cpu_exists;
cpu_values = mat.cpu_values;
mat.cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
mat.cpu_values = nullptr;
gpu_exists = mat.gpu_exists;
gpu_values = mat.gpu_values;
mat.gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
mat.gpu_values = nullptr;
}
//! Copy constructor for dense matrix class
/*!
Forms a dense matrix object by copying CPU and GPU resources/state of the input(lvalue reference) into it
\param[in] mat lvalue reference which binds to an lvalue -dense matrix class object
*/
Dense_Matrix::Dense_Matrix(const Dense_Matrix& mat)
: rows{ mat.rows }, cols{ mat.cols }, lda{ mat.lda }, order{ mat.order }
{
if (mat.ExistsCPU() == true)
{
Allocate_Memory(LOCATION::CPU);
Copy_array_cpu_to_cpu(mat.cpu_values, this->cpu_values, lda * cols);
}
if (mat.ExistsGPU() == true)
{
Allocate_Memory(LOCATION::GPU);
Copy_array_gpu_to_gpu(mat.gpu_values, this->gpu_values, lda * cols);
}
}
//! Allocates memory on the specified location to store dense matirx's internal ararys
/*!
Allocates memory on the specified location based on the leading dimension and the number of colums of the dense matrix object
\param[in] loc enum type which indicates the location -either GPU or CPU, where the dense matrix values are to be stored
*/
void Dense_Matrix::Allocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == false)
{
cpu_values = new DoubleComplex[GetLda() * GetCols()];
cpu_exists = CPU_EXISTENCE::EXISTENT;
}
else if (loc == LOCATION::GPU && ExistsGPU() == false)
{
cudaMalloc((void**)&gpu_values, GetLda() * GetCols() * sizeof(DoubleComplex));
gpu_exists = GPU_EXISTENCE::EXISTENT;
}
}
//! A parameterized constructor for dense matrix class
/*!
\param[in] num_rows number of rows in dense matrix object being formed
\param[in] num_cols number of columns in dense matrix object being formed
\param[in] lda_mat leading dimension of the dense matrix object being formed.(Usually the number of rows is rounded up to a certain value to give lda.)
\param[in] order_mat storage order of the dense matrix object being formed
\param[in] cpu_exists enum type variable which indicates if memory is to be allocated on CPU for the object's internals(the values array) being formed
\param[in] gpu_exists enum type variable which indicates if memory is to be allocated on GPU for the object's internals(the values array) being formed
*/
Dense_Matrix::Dense_Matrix(const int num_rows, const int num_cols, const int lda_mat, const ORDER order_mat, const CPU_EXISTENCE cpu_exists, const GPU_EXISTENCE gpu_exists)
: rows{ num_rows }, cols{ num_cols }, lda{ lda_mat }, order{ order_mat }
{
assert(lda >= rows);
if (cpu_exists == CPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::CPU);
if (gpu_exists == GPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::GPU);
}
//! Destructor for dense matrix class object
/*!
Called automatically when the dense matrix object is destroyed. It deallocates the acquired resources, if any.
*/
Dense_Matrix::~Dense_Matrix()
{
if (ExistsCPU() == true)
Deallocate_Memory(LOCATION::CPU);
if (ExistsGPU() == true)
Deallocate_Memory(LOCATION::GPU);
}
//! Copies dense matrix values from CPU to GPU
/*!
Allocates memory on GPU if required. Copies all values from CPU memory to GPU.
*/
void Dense_Matrix::CopyMatrix_cpu_to_gpu()
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
cudaMemcpy(GetGPUValues(), GetCPUValues(), GetLda() * GetCols() * sizeof(DoubleComplex), cudaMemcpyHostToDevice);
}
//! Copies dense matrix values from GPU to CPU
/*!
Allocates memory on CPU if required. Copies all values from GPU memory to CPU.
*/
void Dense_Matrix::CopyMatrix_gpu_to_cpu()
{
assert(ExistsGPU() == true);
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
cudaMemcpy(GetCPUValues(), GetGPUValues(), GetLda() * GetCols() * sizeof(DoubleComplex), cudaMemcpyDeviceToHost);
}
//! Copies a part of a dense matrix values from CPU to GPU
/*!
Allocates memory on GPU if required. Copies a part of matrix defined by starting and ending column and row indices from CPU memory to GPU memory
\param[in] col_start index of the starting column
\param[in] col_end index of the ending column
\param[in] row_start index of the starting row
\param[in] row_end index of the ending row
*/
void Dense_Matrix::CopyMatrix_cpu_to_gpu(int col_start, int col_end, int row_start, int row_end)//write version for submatrix -copy
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
int N = row_end - row_start + 1;
DoubleComplex* cpu_val, * gpu_val;
for (int i = col_start; i <= col_end; i++)
{
cpu_val = GetSpecificLocationPtrCPU(row_start, i);
gpu_val = GetSpecificLocationPtrGPU(row_start, i);
cudaMemcpy(gpu_val, cpu_val, N * sizeof(DoubleComplex), cudaMemcpyHostToDevice);
}
}
//! Copies a part of a dense matrix values from GPU to CPU
/*!
Allocates memory on CPU if required. Copies a part of matrix defined by starting and ending column and row indices from GPU memory to CPU memory
\param[in] col_start index of the starting column
\param[in] col_end index of the ending column
\param[in] row_start index of the starting row
\param[in] row_end index of the ending row
*/
void Dense_Matrix::CopyMatrix_gpu_to_cpu(int col_start, int col_end, int row_start, int row_end)
{
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
int N = row_end - row_start + 1;
DoubleComplex* cpu_val, * gpu_val;
for (int i = col_start; i <= col_end; i++)
{
cpu_val = GetSpecificLocationPtrCPU(row_start, i);
gpu_val = GetSpecificLocationPtrGPU(row_start, i);
cudaMemcpy(cpu_val, gpu_val, N * sizeof(DoubleComplex), cudaMemcpyDeviceToHost);
}
}
//! Deallocates specified location's resources of the dense matrix object
/*!
\param[in] loc enum type varaible which indicates the location(CPU/GPU) where the resources are to be dealloacted
*/
void Dense_Matrix::Deallocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == true)
{
delete[] cpu_values;
cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
cpu_values = nullptr;
}
if (loc == LOCATION::GPU && ExistsGPU() == true)
{
cudaFree(gpu_values);
gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
gpu_values = nullptr;
}
}
//-------------------------------------------------------------------------------------------------------------
/* CSR Matrix member functions */
//! Allocates memory on the specified location to store CSR matirx's internal ararys
/*!
Allocates memory based on the number of rows and non zero elements of the CSR matrix object
\param[in] loc enum type which indicates the location -either GPU or CPU, where the CSR matrix related arrays are to be stored
*/
void CSR_Matrix::Allocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == false)
{
cpu_values = new DoubleComplex[Getnz()];
cpu_row_ptr = new int[GetRows() + 1];
cpu_col_ind = new int[Getnz()];
cpu_exists = CPU_EXISTENCE::EXISTENT;
}
else if (loc == LOCATION::GPU && ExistsGPU() == false)
{
cudaMalloc((void**)&gpu_values, Getnz() * sizeof(DoubleComplex));
cudaMalloc((void**)&gpu_row_ptr, (GetRows() + 1) * sizeof(int));
cudaMalloc((void**)&gpu_col_ind, Getnz() * sizeof(int));
gpu_exists = GPU_EXISTENCE::EXISTENT;
}
}
//! A parameterized constructor for CSR matrix class
/*!
\param[in] num_rows number of rows in CSR matrix object being formed
\param[in] num_cols number of columns in CSR matrix object being formed
\param[in] nz_mat number of non zero elements in the CSR matrix object being formed
\param[in] cpu_exists enum type variable which indicates if memory is to be allocated on CPU for the object's internals(CSR matrix arrays) being formed
\param[in] gpu_exists enum type variable which indicates if memory is to be allocated on GPU for the object's internals(CSR matrix arrays) being formed
*/
CSR_Matrix::CSR_Matrix(const int num_rows, const int num_cols, const int nz_mat, const CPU_EXISTENCE cpu_exists, const GPU_EXISTENCE gpu_exists)
: rows{ num_rows }, cols{ num_cols }, nz{ nz_mat }
{
if (cpu_exists == CPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::CPU);
if (gpu_exists == GPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::GPU);
}
//! Destructor for CSR matrix class object
/*!
Called automatically when the CSR matrix object is destroyed. It deallocates the acquired resources, if any.
*/
CSR_Matrix::~CSR_Matrix()
{
if (ExistsCPU() == true)
Deallocate_Memory(LOCATION::CPU);
if (ExistsGPU() == true)
Deallocate_Memory(LOCATION::GPU);
}
//! Deallocates specified location's resources of the CSR matrix object
/*!
\param[in] loc enum type varaible which indicates the location(CPU/GPU) where the resources are to be dealloacted
*/
void CSR_Matrix::Deallocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == true)
{
delete[] cpu_values;
delete[] cpu_col_ind;
delete[] cpu_row_ptr;
cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
cpu_values = nullptr;
cpu_col_ind = nullptr;
cpu_row_ptr = nullptr;
}
if (loc == LOCATION::GPU && ExistsGPU() == true)
{
cudaFree(gpu_values);
cudaFree(gpu_row_ptr);
cudaFree(gpu_col_ind);
gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
gpu_values = nullptr;
gpu_row_ptr = nullptr;
gpu_col_ind = nullptr;
}
}
//! Copies CSR matrix internal arrays from CPU to GPU
/*!
Allocates memory on GPU if required. Copies all internal arrays from CPU memory to GPU.
*/
void CSR_Matrix::CopyMatrix_cpu_to_gpu()
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
cudaMemcpy(GetGPUValues(), GetCPUValues(), Getnz() * sizeof(DoubleComplex), cudaMemcpyHostToDevice);
cudaMemcpy(GetGPURowPtr(), GetCPURowPtr(), (GetRows() + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(GetGPUColInd() , GetCPUColInd(), Getnz() * sizeof(int), cudaMemcpyHostToDevice);
}
//! Copies CSR matrix internal arrays from GPU to CPU
/*!
Allocates memory on CPU if required. Copies all internal arrays from GPU memory to CPU.
*/
void CSR_Matrix::CopyMatrix_gpu_to_cpu()
{
assert(ExistsGPU() == true);
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
cudaMemcpy(GetCPUValues() , GetGPUValues() , Getnz() * sizeof(DoubleComplex), cudaMemcpyDeviceToHost);
cudaMemcpy(GetCPURowPtr(), GetGPURowPtr() , (GetRows() + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy( GetCPUColInd(), GetGPUColInd(), Getnz() * sizeof(int), cudaMemcpyDeviceToHost);
}
//-------------------------------------------------------------------------------------------------------------------
/* Member functions for COO matrix class */
//! Allocates memory on the specified location to store COO matirx internal ararys
/*!
Allocates memory based on the number of non zero elements of the COO matrix object
\param[in] loc enum type which indicates the location -either GPU or CPU, where the COO matrix related arrays are to be stored
*/
void COO_Matrix::Allocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == false)
{
cpu_values = new DoubleComplex[Getnz()];
cpu_row_ind = new int[Getnz()];
cpu_col_ind = new int[Getnz()];
cpu_exists = CPU_EXISTENCE::EXISTENT;
}
else if (loc == LOCATION::GPU && ExistsGPU()== false)
{
cudaMalloc((void**)&gpu_values, Getnz() * sizeof(DoubleComplex));
cudaMalloc((void**)&gpu_row_ind, Getnz() * sizeof(int));
cudaMalloc((void**)&gpu_col_ind, Getnz() * sizeof(int));
gpu_exists = GPU_EXISTENCE::EXISTENT;
}
}
//! A parameterized constructor for COO matrix class
/*!
\param[in] num_rows number of rows in COO matrix object being formed
\param[in] num_cols number of columns in COO matrix object being formed
\param[in] mat_nz number of non zero elements in the COO matrix object being formed
\param[in] cpu_exists enum type variable which indicates if memory is to be allocated on CPU for the object's internals(COO matrix arrays) being formed
\param[in] gpu_exists enum type variable which indicates if memory is to be allocated on GPU for the object's internals(COO matrix arrays) being formed
*/
COO_Matrix::COO_Matrix(const int num_rows, const int num_cols, const int mat_nz, const CPU_EXISTENCE cpu_exists, const GPU_EXISTENCE gpu_exists)
: rows{ num_rows }, cols{ num_cols }, nz{ mat_nz }
{
if (cpu_exists == CPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::CPU);
if (gpu_exists == GPU_EXISTENCE::EXISTENT)
Allocate_Memory(LOCATION::GPU);
}
//! Destructor for COO matrix class object
/*!
Called automatically when the COO matrix object is destroyed. It deallocates the acquired resources, if any.
*/
COO_Matrix::~COO_Matrix()
{
if (ExistsCPU() == true)
Deallocate_Memory(LOCATION::CPU);
if (ExistsGPU() == true)
Deallocate_Memory(LOCATION::GPU);
}
//! Deallocates specified location's resources of the COO matrix object
/*!
\param[in] loc enum type varaible which indicates the location(CPU/GPU) where the resources are to be dealloacted
*/
void COO_Matrix::Deallocate_Memory(const LOCATION loc)
{
if (loc == LOCATION::CPU && ExistsCPU() == true)
{
delete[] cpu_values;
delete[] cpu_col_ind;
delete[] cpu_row_ind;
cpu_exists = CPU_EXISTENCE::NON_EXISTENT;
cpu_values = nullptr;
cpu_col_ind = nullptr;;
cpu_row_ind = nullptr;
}
if (loc == LOCATION::GPU && ExistsGPU() == true)
{
cudaFree(gpu_values);
cudaFree(gpu_row_ind);
cudaFree(gpu_col_ind);
gpu_exists = GPU_EXISTENCE::NON_EXISTENT;
gpu_values = nullptr;
gpu_row_ind = nullptr;
gpu_col_ind = nullptr;
}
}
//! Copies COO matrix internal arrays from CPU to GPU
/*!
Allocates memory on GPU if required. Copies all internal arrays from CPU memory to GPU.
*/
void COO_Matrix::CopyMatrix_cpu_to_gpu()
{
assert(ExistsCPU() == true);
if (ExistsGPU() == false)
Allocate_Memory(LOCATION::GPU);
cudaMemcpy(GetGPUValues(), GetCPUValues(), Getnz() * sizeof(DoubleComplex), cudaMemcpyHostToDevice);
cudaMemcpy(GetGPURowInd(), GetCPURowInd(), Getnz() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(GetGPUColInd(), GetCPUColInd(), Getnz() * sizeof(int), cudaMemcpyHostToDevice);
}
//! Copies COO matrix internal arrays from GPU to CPU
/*!
Allocates memory on CPU if required. Copies all internal arrays from GPU memory to CPU.
*/
void COO_Matrix::CopyMatrix_gpu_to_cpu()
{
assert(ExistsGPU() == true);
if (ExistsCPU() == false)
Allocate_Memory(LOCATION::CPU);
cudaMemcpy(GetCPUValues(), GetGPUValues(), Getnz() * sizeof(DoubleComplex), cudaMemcpyDeviceToHost);
cudaMemcpy(GetCPURowInd(), GetGPURowInd() , Getnz() * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(GetCPUColInd(), GetGPUColInd(), Getnz() * sizeof(int), cudaMemcpyDeviceToHost);
} |
140ffc684788bd35c5b9ba1d234a756393e97d3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone <johns@ks.uiuc.edu>
* http://www.ks.uiuc.edu/~johns/
*
* Coulombic potential grid calculation microbenchmark based on the time
* consuming portions of the 'cionize' ion placement tool.
*/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuenergy.h"
/* initatoms()
* Store a pseudorandom arrangement of point charges in *atombuf.
*/
static int
initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
srand(54321); // Ensure that atom placement is repeatable
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = gridspacing * volsize.x;
size.y = gridspacing * volsize.y;
size.z = gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
}
/* writeenergy()
* Write part of the energy array to an output file for verification.
*/
static int
writeenergy(char *filename, float *energy, dim3 volsize)
{
FILE *outfile;
int x, y;
outfile = fopen(filename, "w");
if (outfile == NULL) {
fputs("Cannot open output file\n", stderr);
return -1;
}
/* Print the execution parameters */
fprintf(outfile, "%d %d %d %d\n", volsize.x, volsize.y, volsize.z, ATOMCOUNT);
/* Print a checksum */
{
double sum = 0.0;
for (y = 0; y < volsize.y; y++) {
for (x = 0; x < volsize.x; x++) {
double t = energy[y*volsize.x+x];
t = fmax(-20.0, fmin(20.0, t));
sum += t;
}
}
fprintf(outfile, "%.4g\n", sum);
}
/* Print several rows of the computed data */
for (y = 0; y < 17; y++) {
for (x = 0; x < volsize.x; x++) {
int addr = y * volsize.x + x;
fprintf(outfile, "%.4g ", energy[addr]);
}
fprintf(outfile, "\n");
}
fclose(outfile);
return 0;
}
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
float *energy = NULL; // Output of device calculation
float *atoms = NULL;
dim3 volsize, Gsz, Bsz;
// int final_iteration_count;
// number of atoms to simulate
int atomcount = ATOMCOUNT;
// voxel spacing
const float gridspacing = 0.1;
// Size of buffer on GPU
int volmemsz;
printf("CUDA accelerated coulombic potential microbenchmark\n");
printf("Original version by John E. Stone <johns@ks.uiuc.edu>\n");
printf("This version maintained by Chris Rodrigues\n");
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if (parameters->inpFiles[0]) {
fputs("No input files expected\n", stderr);
return -1;
}
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// setup energy grid size
volsize.x = VOLSIZEX;
volsize.y = VOLSIZEY;
volsize.z = 1;
// setup CUDA grid and block sizes
Bsz.x = BLOCKSIZEX; // each thread does multiple Xs
Bsz.y = BLOCKSIZEY;
Bsz.z = 1;
Gsz.x = volsize.x / (Bsz.x * UNROLLX); // each thread does multiple Xs
Gsz.y = volsize.y / Bsz.y;
Gsz.z = volsize.z / Bsz.z;
#if 0
printf("Grid size: %d x %d x %d\n", volsize.x, volsize.y, volsize.z);
printf("Running kernel(atoms:%d, gridspacing %g, z %d)\n", atomcount, gridspacing, 0);
#endif
// allocate and initialize atom coordinates and charges
if (initatoms(&atoms, atomcount, volsize, gridspacing))
return -1;
// allocate and initialize the GPU output array
volmemsz = sizeof(float) * volsize.x * volsize.y * volsize.z;
// Main computation
{
float *d_output = NULL; // Output on device
int iterations=0;
int atomstart;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMalloc((void**)&d_output, volmemsz);
CUERR // check and clear any existing errors
hipMemset(d_output, 0, volmemsz);
CUERR // check and clear any existing errors
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
for (atomstart=0; atomstart<atomcount; atomstart+=MAXATOMS) {
int atomsremaining = atomcount - atomstart;
int runatoms = (atomsremaining > MAXATOMS) ? MAXATOMS : atomsremaining;
iterations++;
// copy the atoms to the GPU
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
if (copyatomstoconstbuf(atoms + 4*atomstart, runatoms, 0*gridspacing))
return -1;
if (parameters->synchronizeGpu) hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_GPU);
// RUN the kernel...
hipLaunchKernelGGL(( cenergy), dim3(Gsz), dim3(Bsz), 0, 0, runatoms, 0.1, d_output);
CUERR // check and clear any existing errors
if (parameters->synchronizeGpu) hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// final_iteration_count = iterations;
}
#if 0
printf("Done\n");
#endif
// Copy the GPU output data back to the host and use/store it..
energy = (float *) malloc(volmemsz);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMemcpy(energy, d_output, volmemsz, hipMemcpyDeviceToHost);
CUERR // check and clear any existing errors
hipFree(d_output);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
/* Print a subset of the results to a file */
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
if (writeenergy(parameters->outFile, energy, volsize) == -1)
return -1;
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
if( pb_compareFiles(parameters->outFile, "data/ref.txt", 1) )
{
printf("TEST PASSED\n");
}
else
{
printf("TEST FAILED\n");
}
free(atoms);
free(energy);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
| 140ffc684788bd35c5b9ba1d234a756393e97d3f.cu | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone <johns@ks.uiuc.edu>
* http://www.ks.uiuc.edu/~johns/
*
* Coulombic potential grid calculation microbenchmark based on the time
* consuming portions of the 'cionize' ion placement tool.
*/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuenergy.h"
/* initatoms()
* Store a pseudorandom arrangement of point charges in *atombuf.
*/
static int
initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
srand(54321); // Ensure that atom placement is repeatable
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = gridspacing * volsize.x;
size.y = gridspacing * volsize.y;
size.z = gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
}
/* writeenergy()
* Write part of the energy array to an output file for verification.
*/
static int
writeenergy(char *filename, float *energy, dim3 volsize)
{
FILE *outfile;
int x, y;
outfile = fopen(filename, "w");
if (outfile == NULL) {
fputs("Cannot open output file\n", stderr);
return -1;
}
/* Print the execution parameters */
fprintf(outfile, "%d %d %d %d\n", volsize.x, volsize.y, volsize.z, ATOMCOUNT);
/* Print a checksum */
{
double sum = 0.0;
for (y = 0; y < volsize.y; y++) {
for (x = 0; x < volsize.x; x++) {
double t = energy[y*volsize.x+x];
t = fmax(-20.0, fmin(20.0, t));
sum += t;
}
}
fprintf(outfile, "%.4g\n", sum);
}
/* Print several rows of the computed data */
for (y = 0; y < 17; y++) {
for (x = 0; x < volsize.x; x++) {
int addr = y * volsize.x + x;
fprintf(outfile, "%.4g ", energy[addr]);
}
fprintf(outfile, "\n");
}
fclose(outfile);
return 0;
}
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
float *energy = NULL; // Output of device calculation
float *atoms = NULL;
dim3 volsize, Gsz, Bsz;
// int final_iteration_count;
// number of atoms to simulate
int atomcount = ATOMCOUNT;
// voxel spacing
const float gridspacing = 0.1;
// Size of buffer on GPU
int volmemsz;
printf("CUDA accelerated coulombic potential microbenchmark\n");
printf("Original version by John E. Stone <johns@ks.uiuc.edu>\n");
printf("This version maintained by Chris Rodrigues\n");
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if (parameters->inpFiles[0]) {
fputs("No input files expected\n", stderr);
return -1;
}
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// setup energy grid size
volsize.x = VOLSIZEX;
volsize.y = VOLSIZEY;
volsize.z = 1;
// setup CUDA grid and block sizes
Bsz.x = BLOCKSIZEX; // each thread does multiple Xs
Bsz.y = BLOCKSIZEY;
Bsz.z = 1;
Gsz.x = volsize.x / (Bsz.x * UNROLLX); // each thread does multiple Xs
Gsz.y = volsize.y / Bsz.y;
Gsz.z = volsize.z / Bsz.z;
#if 0
printf("Grid size: %d x %d x %d\n", volsize.x, volsize.y, volsize.z);
printf("Running kernel(atoms:%d, gridspacing %g, z %d)\n", atomcount, gridspacing, 0);
#endif
// allocate and initialize atom coordinates and charges
if (initatoms(&atoms, atomcount, volsize, gridspacing))
return -1;
// allocate and initialize the GPU output array
volmemsz = sizeof(float) * volsize.x * volsize.y * volsize.z;
// Main computation
{
float *d_output = NULL; // Output on device
int iterations=0;
int atomstart;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMalloc((void**)&d_output, volmemsz);
CUERR // check and clear any existing errors
cudaMemset(d_output, 0, volmemsz);
CUERR // check and clear any existing errors
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
for (atomstart=0; atomstart<atomcount; atomstart+=MAXATOMS) {
int atomsremaining = atomcount - atomstart;
int runatoms = (atomsremaining > MAXATOMS) ? MAXATOMS : atomsremaining;
iterations++;
// copy the atoms to the GPU
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
if (copyatomstoconstbuf(atoms + 4*atomstart, runatoms, 0*gridspacing))
return -1;
if (parameters->synchronizeGpu) cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_GPU);
// RUN the kernel...
cenergy<<<Gsz, Bsz, 0>>>(runatoms, 0.1, d_output);
CUERR // check and clear any existing errors
if (parameters->synchronizeGpu) cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// final_iteration_count = iterations;
}
#if 0
printf("Done\n");
#endif
// Copy the GPU output data back to the host and use/store it..
energy = (float *) malloc(volmemsz);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMemcpy(energy, d_output, volmemsz, cudaMemcpyDeviceToHost);
CUERR // check and clear any existing errors
cudaFree(d_output);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
/* Print a subset of the results to a file */
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
if (writeenergy(parameters->outFile, energy, volsize) == -1)
return -1;
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
if( pb_compareFiles(parameters->outFile, "data/ref.txt", 1) )
{
printf("TEST PASSED\n");
}
else
{
printf("TEST FAILED\n");
}
free(atoms);
free(energy);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
|
e1ac1dd42c7c5cc7d8d9004d4e2a2199ef41dc56.hip | // !!! This is a file automatically generated by hipify!!!
/*
Based off work by Nelson, et al.
Brigham Young University (2010)
Adapted by Kevin Yuh (2015)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <hipfft.h>
#define PI 3.14159265358979
/* Check errors on CUDA runtime functions */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
/* Check errors on cuFFT functions */
void gpuFFTchk(int errval){
if (errval != HIPFFT_SUCCESS){
printf("Failed FFT call, error code %d\n", errval);
}
}
/* Check errors on CUDA kernel calls */
void checkCUDAKernelError()
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
} else {
fprintf(stderr, "No kernel error detected\n");
}
}
/* Basic ramp filter. Scale all frequencies linearly. */
__global__ void cudaFrequencyKernal(hipfftComplex *out_data, int length) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < length) {
float scaleFactor;
// We need to account for the fact that the highest amplitude is at
// length / 2
if (index < (length / 2)) {
scaleFactor = ((float) index) / (length / 2);
}
else {
scaleFactor = ((float) (length - index)) / (length / 2);
}
hipfftComplex temp = out_data[index];
temp.x = temp.x * scaleFactor;
temp.y = temp.y * scaleFactor;
out_data[index] = temp;
index += blockDim.x * gridDim.x;
}
}
/* Convert an array of complex values to an array of real values. */
__global__ void cudaComplexToRealKernal(hipfftComplex *in_data,
float *out_data,
int length) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < length) {
hipfftComplex in = in_data[index];
out_data[index] = in.x;
index += blockDim.x * gridDim.x;
}
}
/* Backproject the sinogram to an image. */
__global__ void cudaBackprojectionKernal(float *in_data, float *out_data,
int nAngles, int sin_width,
int image_dim) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < (image_dim * image_dim)) {
// Get the pixel (x,y) coordinate from the index value
int y_image = index / image_dim;
int x_image = index % image_dim;
// Get the geometric (x,y) coordinate from the pixel coordinate
int x_geo = x_image - (image_dim / 2);
int y_geo = (image_dim / 2) - y_image;
// For all theta in the sinogram...
for (int i = 0; i < nAngles; i++) {
float d;
// Handle the edges cases of theta = 0 and theta = PI/2
if(i == 0) {
d = (float) x_geo;
}
else if (i == nAngles / 2) {
d = (float) y_geo;
}
else {
float theta = PI * (((float) i) / ((float) nAngles));
float m = -1 * cos(theta) / sin(theta);
float x_i = ((float) (y_geo - m * x_geo)) / ((-1 / m) - m);
float y_i = (-1 / m) * x_i;
d = sqrt((x_i * x_i) + (y_i * y_i));
// Center the index
if (((-1 / m) > 0 && x_i < 0) || ((-1 / m) < 0 && x_i > 0)) {
d *= -1;
}
}
// d is the distance from the center line, so we need to offset d by
// this much
d += sin_width / 2.0;
d = truncf(d);
// Now that we have d, add the right value to the image array
out_data[y_image * image_dim + x_image] += in_data[i * sin_width + (int)d];
}
index += blockDim.x * gridDim.x;
}
}
int main(int argc, char** argv){
if (argc != 7){
fprintf(stderr, "Incorrect number of arguments.\n\n");
fprintf(stderr, "\nArguments: \n \
< Sinogram filename > \n \
< Width or height of original image, whichever is larger > \n \
< Number of angles in sinogram >\n \
< threads per block >\n \
< number of blocks >\n \
< output filename >\n");
exit(EXIT_FAILURE);
}
/********** Parameters **********/
int width = atoi(argv[2]);
int height = width;
int sinogram_width = (int)ceilf( height * sqrt(2) );
int nAngles = atoi(argv[3]);
int threadsPerBlock = atoi(argv[4]);
int nBlocks = atoi(argv[5]);
/********** Data storage *********/
// GPU DATA STORAGE
hipfftComplex *dev_sinogram_cmplx;
float *dev_sinogram_float;
float* output_dev; // Image storage
hipfftComplex *sinogram_host;
size_t size_result = width*height*sizeof(float);
float *output_host = (float *)malloc(size_result);
/*********** Set up IO, Read in data ************/
sinogram_host = (hipfftComplex *)malloc( sinogram_width*nAngles*sizeof(hipfftComplex) );
FILE *dataFile = fopen(argv[1],"r");
if (dataFile == NULL){
fprintf(stderr, "Sinogram file missing\n");
exit(EXIT_FAILURE);
}
FILE *outputFile = fopen(argv[6], "w");
if (outputFile == NULL){
fprintf(stderr, "Output file cannot be written\n");
exit(EXIT_FAILURE);
}
int j, i;
for(i = 0; i < nAngles * sinogram_width; i++){
fscanf(dataFile,"%f",&sinogram_host[i].x);
sinogram_host[i].y = 0;
}
fclose(dataFile);
/*********** Assignment starts here *********/
/* TODO: Allocate memory for all GPU storage above, copy input sinogram
over to dev_sinogram_cmplx. */
int sinogram_size = nAngles * sinogram_width;
hipMalloc((void **) &dev_sinogram_cmplx, sizeof(hipfftComplex) * sinogram_size);
hipMalloc((void **) &dev_sinogram_float, sizeof(float) * sinogram_size);
hipMemcpy(dev_sinogram_cmplx, sinogram_host,
sizeof(hipfftComplex) * sinogram_size,
hipMemcpyHostToDevice);
/* TODO 1: Implement the high-pass filter:
- Use cuFFT for the forward FFT
- Create your own kernel for the frequency scaling.
- Use cuFFT for the inverse FFT
- extract real components to floats
- Free the original sinogram (dev_sinogram_cmplx)
Note: If you want to deal with real-to-complex and complex-to-real
transforms in cuFFT, you'll have to slightly change our code above.
*/
hipfftHandle plan;
int batch = 1;
hipfftPlan1d(&plan, sinogram_size, HIPFFT_C2C, batch);
// Run the forward DFT
hipfftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, HIPFFT_FORWARD);
// Apply basic ramp filter
hipLaunchKernelGGL(( cudaFrequencyKernal), dim3(nBlocks), dim3(threadsPerBlock), 0, 0,
dev_sinogram_cmplx, sinogram_size);
// Run the inverse DFT
hipfftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, HIPFFT_BACKWARD);
// Extract the real components to floats
hipLaunchKernelGGL(( cudaComplexToRealKernal), dim3(nBlocks), dim3(threadsPerBlock), 0, 0,
dev_sinogram_cmplx, dev_sinogram_float, sinogram_size);
// Free the original sinogram
hipFree(dev_sinogram_cmplx);
/* TODO 2: Implement backprojection.
- Allocate memory for the output image.
- Create your own kernel to accelerate backprojection.
- Copy the reconstructed image back to output_host.
- Free all remaining memory on the GPU.
*/
hipMalloc((void **) &output_dev, sizeof(float) * width * height);
hipMemset(output_dev, 0, sizeof(float) * width * height);
// Run the Backprojection kernal
hipLaunchKernelGGL(( cudaBackprojectionKernal), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, dev_sinogram_float,
output_dev,
nAngles,
sinogram_width,
width);
// Copy the reconstructed image back to host
hipMemcpy(output_host, output_dev, sizeof(float) * width * height, hipMemcpyDeviceToHost);
// Free the remaining GPU memory
hipFree(dev_sinogram_float);
hipFree(output_dev);
/* Export image data. */
for(j = 0; j < width; j++){
for(i = 0; i < height; i++){
fprintf(outputFile, "%e ",output_host[j*width + i]);
}
fprintf(outputFile, "\n");
}
/* Cleanup: Free host memory, close files. */
free(sinogram_host);
free(output_host);
fclose(outputFile);
return 0;
}
| e1ac1dd42c7c5cc7d8d9004d4e2a2199ef41dc56.cu |
/*
Based off work by Nelson, et al.
Brigham Young University (2010)
Adapted by Kevin Yuh (2015)
*/
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cufft.h>
#define PI 3.14159265358979
/* Check errors on CUDA runtime functions */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
/* Check errors on cuFFT functions */
void gpuFFTchk(int errval){
if (errval != CUFFT_SUCCESS){
printf("Failed FFT call, error code %d\n", errval);
}
}
/* Check errors on CUDA kernel calls */
void checkCUDAKernelError()
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
} else {
fprintf(stderr, "No kernel error detected\n");
}
}
/* Basic ramp filter. Scale all frequencies linearly. */
__global__ void cudaFrequencyKernal(cufftComplex *out_data, int length) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < length) {
float scaleFactor;
// We need to account for the fact that the highest amplitude is at
// length / 2
if (index < (length / 2)) {
scaleFactor = ((float) index) / (length / 2);
}
else {
scaleFactor = ((float) (length - index)) / (length / 2);
}
cufftComplex temp = out_data[index];
temp.x = temp.x * scaleFactor;
temp.y = temp.y * scaleFactor;
out_data[index] = temp;
index += blockDim.x * gridDim.x;
}
}
/* Convert an array of complex values to an array of real values. */
__global__ void cudaComplexToRealKernal(cufftComplex *in_data,
float *out_data,
int length) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < length) {
cufftComplex in = in_data[index];
out_data[index] = in.x;
index += blockDim.x * gridDim.x;
}
}
/* Backproject the sinogram to an image. */
__global__ void cudaBackprojectionKernal(float *in_data, float *out_data,
int nAngles, int sin_width,
int image_dim) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < (image_dim * image_dim)) {
// Get the pixel (x,y) coordinate from the index value
int y_image = index / image_dim;
int x_image = index % image_dim;
// Get the geometric (x,y) coordinate from the pixel coordinate
int x_geo = x_image - (image_dim / 2);
int y_geo = (image_dim / 2) - y_image;
// For all theta in the sinogram...
for (int i = 0; i < nAngles; i++) {
float d;
// Handle the edges cases of theta = 0 and theta = PI/2
if(i == 0) {
d = (float) x_geo;
}
else if (i == nAngles / 2) {
d = (float) y_geo;
}
else {
float theta = PI * (((float) i) / ((float) nAngles));
float m = -1 * cos(theta) / sin(theta);
float x_i = ((float) (y_geo - m * x_geo)) / ((-1 / m) - m);
float y_i = (-1 / m) * x_i;
d = sqrt((x_i * x_i) + (y_i * y_i));
// Center the index
if (((-1 / m) > 0 && x_i < 0) || ((-1 / m) < 0 && x_i > 0)) {
d *= -1;
}
}
// d is the distance from the center line, so we need to offset d by
// this much
d += sin_width / 2.0;
d = truncf(d);
// Now that we have d, add the right value to the image array
out_data[y_image * image_dim + x_image] += in_data[i * sin_width + (int)d];
}
index += blockDim.x * gridDim.x;
}
}
int main(int argc, char** argv){
if (argc != 7){
fprintf(stderr, "Incorrect number of arguments.\n\n");
fprintf(stderr, "\nArguments: \n \
< Sinogram filename > \n \
< Width or height of original image, whichever is larger > \n \
< Number of angles in sinogram >\n \
< threads per block >\n \
< number of blocks >\n \
< output filename >\n");
exit(EXIT_FAILURE);
}
/********** Parameters **********/
int width = atoi(argv[2]);
int height = width;
int sinogram_width = (int)ceilf( height * sqrt(2) );
int nAngles = atoi(argv[3]);
int threadsPerBlock = atoi(argv[4]);
int nBlocks = atoi(argv[5]);
/********** Data storage *********/
// GPU DATA STORAGE
cufftComplex *dev_sinogram_cmplx;
float *dev_sinogram_float;
float* output_dev; // Image storage
cufftComplex *sinogram_host;
size_t size_result = width*height*sizeof(float);
float *output_host = (float *)malloc(size_result);
/*********** Set up IO, Read in data ************/
sinogram_host = (cufftComplex *)malloc( sinogram_width*nAngles*sizeof(cufftComplex) );
FILE *dataFile = fopen(argv[1],"r");
if (dataFile == NULL){
fprintf(stderr, "Sinogram file missing\n");
exit(EXIT_FAILURE);
}
FILE *outputFile = fopen(argv[6], "w");
if (outputFile == NULL){
fprintf(stderr, "Output file cannot be written\n");
exit(EXIT_FAILURE);
}
int j, i;
for(i = 0; i < nAngles * sinogram_width; i++){
fscanf(dataFile,"%f",&sinogram_host[i].x);
sinogram_host[i].y = 0;
}
fclose(dataFile);
/*********** Assignment starts here *********/
/* TODO: Allocate memory for all GPU storage above, copy input sinogram
over to dev_sinogram_cmplx. */
int sinogram_size = nAngles * sinogram_width;
cudaMalloc((void **) &dev_sinogram_cmplx, sizeof(cufftComplex) * sinogram_size);
cudaMalloc((void **) &dev_sinogram_float, sizeof(float) * sinogram_size);
cudaMemcpy(dev_sinogram_cmplx, sinogram_host,
sizeof(cufftComplex) * sinogram_size,
cudaMemcpyHostToDevice);
/* TODO 1: Implement the high-pass filter:
- Use cuFFT for the forward FFT
- Create your own kernel for the frequency scaling.
- Use cuFFT for the inverse FFT
- extract real components to floats
- Free the original sinogram (dev_sinogram_cmplx)
Note: If you want to deal with real-to-complex and complex-to-real
transforms in cuFFT, you'll have to slightly change our code above.
*/
cufftHandle plan;
int batch = 1;
cufftPlan1d(&plan, sinogram_size, CUFFT_C2C, batch);
// Run the forward DFT
cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_FORWARD);
// Apply basic ramp filter
cudaFrequencyKernal<<<nBlocks, threadsPerBlock>>>
(dev_sinogram_cmplx, sinogram_size);
// Run the inverse DFT
cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_INVERSE);
// Extract the real components to floats
cudaComplexToRealKernal<<<nBlocks, threadsPerBlock>>>
(dev_sinogram_cmplx, dev_sinogram_float, sinogram_size);
// Free the original sinogram
cudaFree(dev_sinogram_cmplx);
/* TODO 2: Implement backprojection.
- Allocate memory for the output image.
- Create your own kernel to accelerate backprojection.
- Copy the reconstructed image back to output_host.
- Free all remaining memory on the GPU.
*/
cudaMalloc((void **) &output_dev, sizeof(float) * width * height);
cudaMemset(output_dev, 0, sizeof(float) * width * height);
// Run the Backprojection kernal
cudaBackprojectionKernal<<<nBlocks, threadsPerBlock>>>(dev_sinogram_float,
output_dev,
nAngles,
sinogram_width,
width);
// Copy the reconstructed image back to host
cudaMemcpy(output_host, output_dev, sizeof(float) * width * height, cudaMemcpyDeviceToHost);
// Free the remaining GPU memory
cudaFree(dev_sinogram_float);
cudaFree(output_dev);
/* Export image data. */
for(j = 0; j < width; j++){
for(i = 0; i < height; i++){
fprintf(outputFile, "%e ",output_host[j*width + i]);
}
fprintf(outputFile, "\n");
}
/* Cleanup: Free host memory, close files. */
free(sinogram_host);
free(output_host);
fclose(outputFile);
return 0;
}
|
639d75f0ccdc22f2bbafc95b6b3b18a44f20a579.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
struct Point {
double* x;
double* y;
double* z;
};
struct Ref {
Point pos;
Point dir;
double* distance;
};
struct View {
int size;
double* pos;
double* dir;
double* distance;
__device__ Ref operator[](int i) const {
return {{pos + i, pos + i + size, pos + i + 2*size},
{dir + i, dir + i + size, dir + i + 2*size},
distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
*ref.pos.x += *ref.dir.x * nextdist;
*ref.pos.y += *ref.dir.y * nextdist;
*ref.pos.z += *ref.dir.z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
| 639d75f0ccdc22f2bbafc95b6b3b18a44f20a579.cu | struct Point {
double* x;
double* y;
double* z;
};
struct Ref {
Point pos;
Point dir;
double* distance;
};
struct View {
int size;
double* pos;
double* dir;
double* distance;
__device__ Ref operator[](int i) const {
return {{pos + i, pos + i + size, pos + i + 2*size},
{dir + i, dir + i + size, dir + i + 2*size},
distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
*ref.pos.x += *ref.dir.x * nextdist;
*ref.pos.y += *ref.dir.y * nextdist;
*ref.pos.z += *ref.dir.z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
|
4ee23cf05340b8283699777b69e102016bf18ed7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "floyd_warshall_algo.cuh"
using namespace std;
void _showPath(int start,int end,const vector<Piii> &path,const int *D,const int N){
cout<<"\nHere is the shortest cost path from "<<start<< " to "<<end<<", at a total cost of "<<D[start*N+end]<<".\n";
for(int i=path.size()-1;i>=0;--i){
cout<<"From "<<path[i].first.first<<" to "<<path[i].first.second<<" at a cost of "<<path[i].second<<'\n';
}
cout<<'\n';
}
bool _getPath(int curEdge, int nxtEdge,vector<Piii> &path,const int *D, const int *Dpath,const int N){
int curIdx=curEdge*N+nxtEdge;
if(D[curIdx]>=INF)return false;
if(Dpath[curIdx]==-1){//end of backwards retracement
path.push_back(make_pair(make_pair(curEdge,nxtEdge),D[curIdx]));
return true;
}else{//record last edge cost and move backwards
path.push_back(make_pair(make_pair(Dpath[curIdx],nxtEdge),D[Dpath[curIdx]*N+nxtEdge]));
return _getPath(curEdge,Dpath[curIdx],path,D,Dpath,N);
}
}
void _get_full_paths(const int *D, const int *Dpath, const int N, int *roots){
int start_vertex=-1,end_vertex=-1;
vector<Piii> path;
//*
path.clear();
start_vertex = 975;
end_vertex = 997;
if(_getPath(start_vertex, end_vertex,path,D,Dpath,N)){
_showPath(start_vertex,end_vertex,path,D,N);
}else{
cout<<"\nThere does not exist valid a path between "<<start_vertex<<" , and "<<end_vertex<<'\n';
}
for(int j=0,i=path.size()-1;i>=0;++j,--i){
roots[j] = path[i].first.first;
}
//*/
/*
do{
path.clear();
cout<<"Enter start vertex #:";
cin>>start_vertex;
cout<<"Enter dest vertex(enter negative number to exit) #:";
cin>>end_vertex;
if(start_vertex<0 || start_vertex>=N || end_vertex<0 || end_vertex>=N)return;
if(_getPath(start_vertex, end_vertex,path,D,Dpath,N)){
_showPath(start_vertex,end_vertex,path,D,N);
}else{
cout<<"\nThere does not exist valid a path between "<<start_vertex<<" , and "<<end_vertex<<'\n';
}
}while(1);
//*/
}
__global__ void _Wake_GPU(int reps){
int idx=blockIdx.x*blockDim.x + threadIdx.x;
if(idx>=reps)return;
}
__global__ void _GPU_Floyd_kernel(int k, int *G,int *P, int N){//G will be the adjacency matrix, P will be path matrix
int col=blockIdx.x*blockDim.x + threadIdx.x;
if(col>=N)return;
int idx=N*blockIdx.y+col;
__shared__ int best;
if(threadIdx.x==0)
best=G[N*blockIdx.y+k];
__syncthreads();
if(best==INF)return;
int tmp_b=G[k*N+col];
if(tmp_b==INF)return;
int cur=best+tmp_b;
if(cur<G[idx]){
G[idx]=cur;
P[idx]=k;
}
}
void _GPU_Floyd(int *H_G, int *H_Gpath, const int N){
//allocate device memory and copy graph data from host
int *dG,*dP;
int numBytes=N*N*sizeof(int);
hipError_t err=hipMalloc((int **)&dG,numBytes);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipMalloc((int **)&dP,numBytes);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
//copy from host to device graph info
err=hipMemcpy(dG,H_G,numBytes,hipMemcpyHostToDevice);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipMemcpy(dP,H_Gpath,numBytes,hipMemcpyHostToDevice);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
dim3 dimGrid((N+BLOCK_SIZE-1)/BLOCK_SIZE,N);
for(int k=0;k<N;k++){//main loop
hipLaunchKernelGGL(( _GPU_Floyd_kernel), dim3(dimGrid),dim3(BLOCK_SIZE), 0, 0, k,dG,dP,N);
err = hipDeviceSynchronize();
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
}
//copy back memory
err=hipMemcpy(H_G,dG,numBytes,hipMemcpyDeviceToHost);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipMemcpy(H_Gpath,dP,numBytes,hipMemcpyDeviceToHost);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
//free device memory
err=hipFree(dG);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipFree(dP);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
}
void _generateCustomGraph(int *G, int N){
FILE *dataFile = fopen("data.txt", "r");
if(dataFile != NULL){
cout<<"Successfully opened file.\n";
} else {
cout<<"File not found.\n";
}
for(int i=0; i < 2016*2016; i++){
fscanf(dataFile, "%d", &G[i]);
}
fclose(dataFile);
}
| 4ee23cf05340b8283699777b69e102016bf18ed7.cu | #include "floyd_warshall_algo.cuh"
using namespace std;
void _showPath(int start,int end,const vector<Piii> &path,const int *D,const int N){
cout<<"\nHere is the shortest cost path from "<<start<< " to "<<end<<", at a total cost of "<<D[start*N+end]<<".\n";
for(int i=path.size()-1;i>=0;--i){
cout<<"From "<<path[i].first.first<<" to "<<path[i].first.second<<" at a cost of "<<path[i].second<<'\n';
}
cout<<'\n';
}
bool _getPath(int curEdge, int nxtEdge,vector<Piii> &path,const int *D, const int *Dpath,const int N){
int curIdx=curEdge*N+nxtEdge;
if(D[curIdx]>=INF)return false;
if(Dpath[curIdx]==-1){//end of backwards retracement
path.push_back(make_pair(make_pair(curEdge,nxtEdge),D[curIdx]));
return true;
}else{//record last edge cost and move backwards
path.push_back(make_pair(make_pair(Dpath[curIdx],nxtEdge),D[Dpath[curIdx]*N+nxtEdge]));
return _getPath(curEdge,Dpath[curIdx],path,D,Dpath,N);
}
}
void _get_full_paths(const int *D, const int *Dpath, const int N, int *roots){
int start_vertex=-1,end_vertex=-1;
vector<Piii> path;
//*
path.clear();
start_vertex = 975;
end_vertex = 997;
if(_getPath(start_vertex, end_vertex,path,D,Dpath,N)){
_showPath(start_vertex,end_vertex,path,D,N);
}else{
cout<<"\nThere does not exist valid a path between "<<start_vertex<<" , and "<<end_vertex<<'\n';
}
for(int j=0,i=path.size()-1;i>=0;++j,--i){
roots[j] = path[i].first.first;
}
//*/
/*
do{
path.clear();
cout<<"Enter start vertex #:";
cin>>start_vertex;
cout<<"Enter dest vertex(enter negative number to exit) #:";
cin>>end_vertex;
if(start_vertex<0 || start_vertex>=N || end_vertex<0 || end_vertex>=N)return;
if(_getPath(start_vertex, end_vertex,path,D,Dpath,N)){
_showPath(start_vertex,end_vertex,path,D,N);
}else{
cout<<"\nThere does not exist valid a path between "<<start_vertex<<" , and "<<end_vertex<<'\n';
}
}while(1);
//*/
}
__global__ void _Wake_GPU(int reps){
int idx=blockIdx.x*blockDim.x + threadIdx.x;
if(idx>=reps)return;
}
__global__ void _GPU_Floyd_kernel(int k, int *G,int *P, int N){//G will be the adjacency matrix, P will be path matrix
int col=blockIdx.x*blockDim.x + threadIdx.x;
if(col>=N)return;
int idx=N*blockIdx.y+col;
__shared__ int best;
if(threadIdx.x==0)
best=G[N*blockIdx.y+k];
__syncthreads();
if(best==INF)return;
int tmp_b=G[k*N+col];
if(tmp_b==INF)return;
int cur=best+tmp_b;
if(cur<G[idx]){
G[idx]=cur;
P[idx]=k;
}
}
void _GPU_Floyd(int *H_G, int *H_Gpath, const int N){
//allocate device memory and copy graph data from host
int *dG,*dP;
int numBytes=N*N*sizeof(int);
cudaError_t err=cudaMalloc((int **)&dG,numBytes);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaMalloc((int **)&dP,numBytes);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
//copy from host to device graph info
err=cudaMemcpy(dG,H_G,numBytes,cudaMemcpyHostToDevice);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaMemcpy(dP,H_Gpath,numBytes,cudaMemcpyHostToDevice);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
dim3 dimGrid((N+BLOCK_SIZE-1)/BLOCK_SIZE,N);
for(int k=0;k<N;k++){//main loop
_GPU_Floyd_kernel<<<dimGrid,BLOCK_SIZE>>>(k,dG,dP,N);
err = cudaThreadSynchronize();
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
}
//copy back memory
err=cudaMemcpy(H_G,dG,numBytes,cudaMemcpyDeviceToHost);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaMemcpy(H_Gpath,dP,numBytes,cudaMemcpyDeviceToHost);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
//free device memory
err=cudaFree(dG);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaFree(dP);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
}
void _generateCustomGraph(int *G, int N){
FILE *dataFile = fopen("data.txt", "r");
if(dataFile != NULL){
cout<<"Successfully opened file.\n";
} else {
cout<<"File not found.\n";
}
for(int i=0; i < 2016*2016; i++){
fscanf(dataFile, "%d", &G[i]);
}
fclose(dataFile);
}
|
2a8d99cfd45aa868f7e014c29ac2efc5f7dba872.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2000-2021, Heiko Bauke
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
#include <iostream>
#include <vector>
#include <trng/yarn2.hpp>
#include <trng/uniform01_dist.hpp>
__global__ void parallel_pi(long samples, trng::yarn2 *rx, trng::yarn2 *ry, long *in) {
long rank = threadIdx.x;
long size = blockDim.x;
trng::uniform01_dist<float> u; // random number distribution
in[rank] = 0; // local number of points in circle
for (long i = rank * samples / size; i < (rank + 1) * samples / size; ++i) {
const float x = u(rx[rank]), y = u(ry[rank]); // choose random x- and y-coordinates
if (x * x + y * y <= 1) // is point in circle?
++in[rank]; // increase thread-local counter
}
}
int main(int argc, char *argv[]) {
const long samples{1000000l}; // total number of points in square
const int size{128}; // number of threads
trng::yarn2 *rx{new trng::yarn2[size]}; // random number engines
trng::yarn2 *ry{new trng::yarn2[size]}; // random number engines
for (int rank{0}; rank < size; ++rank) {
rx[rank].split(2, 0); // choose sub-stream no. 0 out of 2 streams
ry[rank].split(2, 1); // choose sub-stream no. 1 out of 2 streams
rx[rank].split(size, rank); // choose sub-stream no. rank out of size streams
ry[rank].split(size, rank); // choose sub-stream no. rank out of size streams
}
// copy random number engines to CUDA device
trng::yarn2 *rx_device, *ry_device;
hipMalloc(&rx_device, size * sizeof(*rx_device));
hipMalloc(&ry_device, size * sizeof(*ry_device));
hipMemcpy(rx_device, rx, size * sizeof(*rx), hipMemcpyHostToDevice);
hipMemcpy(ry_device, ry, size * sizeof(*ry), hipMemcpyHostToDevice);
// memory for thread local results
long *in_device;
hipMalloc(&in_device, size * sizeof(*in_device));
// start parallel Monte Carlo
hipLaunchKernelGGL(( parallel_pi), dim3(1), dim3(size), 0, 0, samples, rx_device, ry_device, in_device);
// gather results
std::vector<long> in(size);
hipMemcpy(in.data(), in_device, size * sizeof(*in), hipMemcpyDeviceToHost);
hipFree(rx_device);
hipFree(ry_device);
long sum{0};
for (int rank{0}; rank < size; ++rank)
sum += in[rank];
// print result
std::cout << "pi = " << 4.0 * sum / samples << std::endl;
return EXIT_SUCCESS;
}
| 2a8d99cfd45aa868f7e014c29ac2efc5f7dba872.cu | // Copyright (c) 2000-2021, Heiko Bauke
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
#include <iostream>
#include <vector>
#include <trng/yarn2.hpp>
#include <trng/uniform01_dist.hpp>
__global__ void parallel_pi(long samples, trng::yarn2 *rx, trng::yarn2 *ry, long *in) {
long rank = threadIdx.x;
long size = blockDim.x;
trng::uniform01_dist<float> u; // random number distribution
in[rank] = 0; // local number of points in circle
for (long i = rank * samples / size; i < (rank + 1) * samples / size; ++i) {
const float x = u(rx[rank]), y = u(ry[rank]); // choose random x- and y-coordinates
if (x * x + y * y <= 1) // is point in circle?
++in[rank]; // increase thread-local counter
}
}
int main(int argc, char *argv[]) {
const long samples{1000000l}; // total number of points in square
const int size{128}; // number of threads
trng::yarn2 *rx{new trng::yarn2[size]}; // random number engines
trng::yarn2 *ry{new trng::yarn2[size]}; // random number engines
for (int rank{0}; rank < size; ++rank) {
rx[rank].split(2, 0); // choose sub-stream no. 0 out of 2 streams
ry[rank].split(2, 1); // choose sub-stream no. 1 out of 2 streams
rx[rank].split(size, rank); // choose sub-stream no. rank out of size streams
ry[rank].split(size, rank); // choose sub-stream no. rank out of size streams
}
// copy random number engines to CUDA device
trng::yarn2 *rx_device, *ry_device;
cudaMalloc(&rx_device, size * sizeof(*rx_device));
cudaMalloc(&ry_device, size * sizeof(*ry_device));
cudaMemcpy(rx_device, rx, size * sizeof(*rx), cudaMemcpyHostToDevice);
cudaMemcpy(ry_device, ry, size * sizeof(*ry), cudaMemcpyHostToDevice);
// memory for thread local results
long *in_device;
cudaMalloc(&in_device, size * sizeof(*in_device));
// start parallel Monte Carlo
parallel_pi<<<1, size>>>(samples, rx_device, ry_device, in_device);
// gather results
std::vector<long> in(size);
cudaMemcpy(in.data(), in_device, size * sizeof(*in), cudaMemcpyDeviceToHost);
cudaFree(rx_device);
cudaFree(ry_device);
long sum{0};
for (int rank{0}; rank < size; ++rank)
sum += in[rank];
// print result
std::cout << "pi = " << 4.0 * sum / samples << std::endl;
return EXIT_SUCCESS;
}
|
ce854035cde477858fd7948a86c4c245bbbe29a6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copyToOpenMM.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
float *source = NULL;
hipMalloc(&source, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copyToOpenMM), dim3(gridBlock),dim3(threadBlock), 0, 0, target,source,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copyToOpenMM), dim3(gridBlock),dim3(threadBlock), 0, 0, target,source,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copyToOpenMM), dim3(gridBlock),dim3(threadBlock), 0, 0, target,source,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ce854035cde477858fd7948a86c4c245bbbe29a6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copyToOpenMM.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
float *source = NULL;
cudaMalloc(&source, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copyToOpenMM<<<gridBlock,threadBlock>>>(target,source,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copyToOpenMM<<<gridBlock,threadBlock>>>(target,source,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copyToOpenMM<<<gridBlock,threadBlock>>>(target,source,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
13ce1686543ac18a72d172dbf9b268e3799e74db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** ------------------------------------------------------------------------ **/
/** MOMENTUM WITH EFFECTIVE VISCOSITY **/
/** ------------------------------------------------------------------------ **/
#include "schumann.h"
/*---------------------------------------------------------------------------*/
/*----------------------- Instantaneous Y momentum -----------------------*/
/*---------------------------------------------------------------------------*/
__global__ void momentum_schumann_y(int const sections, int const time_method, const REAL *d_u,
const REAL *d_v, const REAL *d_w, REAL *d_vnew, REAL *d_vt1,
REAL *d_vt2, const REAL *d_nu, const REAL *d_nu_rans,
const REAL *d_tauw21w, const REAL *d_tauw21e,
const REAL *d_tauw23b, const REAL *d_tauw23t, const REAL *d_phi,
const REAL *d_Tinflow, const REAL *d_df,
const REAL *d_forcing_y, REAL *d_tauw23bst, REAL *d_tauw23tst)
{
REAL dt = DCONSTANT_DT;
// REAL H = DCONSTANT_TURB_TRANS;
int const nlayers = DCONSTANT_NLAYERS;
unsigned int xpos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ypos = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int I = ypos * (gridDim.x * blockDim.x) + xpos + NXNY;
if ((xpos == 0) || (xpos >= (DCONSTANT_NX - 1)) || (ypos == 0) || (ypos >= (DCONSTANT_NY - 1))
|| (sections == 0))
return;
unsigned int kbeg, kend;
if (sections & SECTION_BOT) {
kbeg = 0;
} else {
kbeg = 1;
}
if (sections & SECTION_TOP) {
kend = nlayers;
} else {
kend = nlayers - 1;
}
bool do_mid = (sections & SECTION_MID);
int k = kbeg;
#ifndef VELOCITY_NUDGING
REAL vproj;
#endif // VELOCITY_NUDGING
// REAL Dc = 0.000001;
// REAL Dn = 0.000001;
// REAL De = 0.000001;
// REAL Dne = 0.000001;
// REAL Dw = 0.000001;
// REAL Dnw = 0.000001;
// REAL Dt = 0.000001;
// REAL Dtn = 0.000001;
// REAL Db = 0.000001;
// REAL Dbn = 0.000001;
REAL vip = NU; // visc. plus one index
REAL vic = NU; // visc. at c
REAL vxe = NU; // visc. x-dir east
REAL vxw = NU; // visc. x-dir west
REAL vzt = NU; // visc. z-dir top
REAL vzb = NU; // visc. z-dir bottom
// Zero out storage
d_tauw23bst[ I - NXNY ] = 0.0;
d_tauw23tst[ I - NXNY ] = 0.0;
#ifdef TEMP_TURB_INLET_PARTIAL_COUPLING
int tinflow_check_xdir = (xpos > DCONSTANT_PERTURB_GCMIN_X && xpos < DCONSTANT_PERTURB_GCMAX_X
&& ypos > DCONSTANT_PERTURB_GCMIN_Y);
int tinflow_check_ydir = (ypos > DCONSTANT_PERTURB_GCMIN_Y && ypos < DCONSTANT_PERTURB_GCMAX_Y
&& xpos > DCONSTANT_PERTURB_GCMIN_X);
int tinflow_check = (d_Tinflow != 0) && ((tinflow_check_xdir || tinflow_check_ydir));
#else
int tinflow_check = (d_Tinflow != 0);
#endif // TEMP_TURB_INLET_PARTIAL_COUPLING
int eddyvisc_check
= (xpos < DCONSTANT_PERTURB_EDDYVIS_MAX_X || ypos < DCONSTANT_PERTURB_EDDYVIS_MAX_Y);
#ifdef TEMPERATURE_SOLUTION
REAL t_at_v = DCONSTANT_TEMP_ISOTH;
#endif // TEMPERATURE_SOLUTION
#ifdef TEMP_TURB_INLET
REAL ti_at_v = DCONSTANT_TEMP_ISOTH;
#endif // TEMP_TURB_INLET
while (k < kend) {
unsigned int base = I + k * NXNY;
unsigned int north = base + NX;
REAL z = (REAL)(k + DCONSTANT_ZFIRST) * DCONSTANT_DZ;
// Shear stresses
REAL tauw23b = d_tauw23b[ I - NXNY ]; // bottom wall
REAL tauw23t = d_tauw23t[ I - NXNY ]; // top wall
REAL tauw21w = d_tauw21w[ k * NY + ypos ]; // west wall
REAL tauw21e = d_tauw21e[ k * NY + ypos ]; // east wall
REAL diff, adv;
REAL boussinesq_approx = 0.0;
REAL uc = d_u[ base ];
REAL uw = d_u[ base - 1 ];
REAL un = d_u[ north ];
REAL unw = d_u[ base + NX - 1 ];
REAL vc = d_v[ base ];
REAL ve = d_v[ base + 1 ];
REAL vw = d_v[ base - 1 ];
REAL vn = d_v[ north ];
REAL vs = d_v[ base - NX ];
REAL vt = d_v[ base + NXNY ];
REAL vb = d_v[ base - NXNY ];
REAL wc = d_w[ base ];
REAL wn = d_w[ base + NX ];
REAL wb = d_w[ base - NXNY ];
REAL wbn = d_w[ base - NXNY + NX ];
// Average the turbulent viscosities around the v velocity face
if (d_nu != 0) {
// if ( d_nu_rans != 0 )
//{
// Dc = d_df[base]; // The allocation logic should guarantee that d_df is allocated
// if hybrid RANS/LES is chosen Dn = d_df[north]; De = d_df[base+1]; Dne =
// d_df[base+NX+1]; Dw = d_df[base-1]; Dnw = d_df[base+NX-1]; Dt = d_df[base+NXNY];
// Dtn = d_df[base+NXNY+NX];
// Db = d_df[base-NXNY];
// Dbn = d_df[base-NXNY+NX];
// vip = NU + d_nu_rans[north] * ( Dn <= H ) + d_nu[north] * ( Dn > H );
// vic = NU + d_nu_rans[base] * ( Dc <= H ) + d_nu[base] * ( Dc > H );
// if ( ( Dc > H ) || ( Dn > H ) || ( Dne > H ) || ( De > H ) ) {
// vxe = NU + 0.25*(d_nu[base]+ d_nu[north] + d_nu[base+NX+1] + d_nu[base+1]);
// } else {
// vxe = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base+NX+1] +
// d_nu_rans[base+1]);
// }
// if ( ( Dc > H ) || ( Dn > H ) || ( Dnw > H ) || ( Dw > H ) ) {
// vxw = NU + 0.25*(d_nu[base] + d_nu[north] + d_nu[base+NX-1] + d_nu[base-1]);
// } else {
// vxw = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base+NX-1] +
// d_nu_rans[base-1]);
// }
// if ( ( Dc > H ) || ( Dn > H ) || ( Dtn > H ) || ( Dt > H ) ) {
// vzt = NU + 0.25*(d_nu[base] + d_nu[north] + d_nu[base+NXNY+NX] +
// d_nu[base+NXNY]);
// } else {
// vzt = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base+NXNY+NX] +
// d_nu_rans[base+NXNY]);
// }
// if ( ( Dc > H ) || ( Dn > H ) || ( Dbn > H ) || ( Db > H ) ) {
// vzb = NU + 0.25*(d_nu[base] + d_nu[north] + d_nu[base-NXNY+NX] +
// d_nu[base-NXNY]);
// } else {
// vzb = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base-NXNY+NX] +
// d_nu_rans[base-NXNY]);
// }
//} else
//{
// Eliminate eddy viscosity in perturbation zone
REAL nu_perturb = (1.0 - eddyvisc_check);
vic = nu_perturb * d_nu[ base ]; // use to reduce global memory accesses
vip = nu_perturb * d_nu[ north ]; // use to reduce global memory accesses
vxe = NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base + NX + 1 ] + d_nu[ base + 1 ]);
vxw = NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base + NX - 1 ] + d_nu[ base - 1 ]);
vzt
= NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base + NXNY + NX ] + d_nu[ base + NXNY ]);
vzb
= NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base - NXNY + NX ] + d_nu[ base - NXNY ]);
vic += NU; // Add molecular viscosity
vip += NU; // Add molecular viscosity
//}
}
#ifdef TEMP_TURB_INLET
// Apply boussinesq approximation for turbulent inflow temperature bouyancy effects
if (tinflow_check) {
// average the temperature about the w velocity point
ti_at_v = 0.5 * (d_Tinflow[ base ] + d_Tinflow[ base + NX ]);
}
boussinesq_approx += boussinesq_bouyancy(DCONSTANT_GRAVITY_Y, ti_at_v);
#endif // TEMP_TURB_INLET
// Calculate shear terms
REAL tau21p = vxe * ((ve - vc) * dxi + (un - uc) * dyi);
REAL tau21m = vxw * ((vc - vw) * dxi + (unw - uw) * dyi);
REAL tau22p = 2.0 * vip * (vn - vc) * dyi;
REAL tau22m = 2.0 * vic * (vc - vs) * dyi;
REAL tau23p = vzt * ((vt - vc) * dzi + (wn - wc) * dyi);
REAL tau23m = vzb * ((vc - vb) * dzi + (wbn - wb) * dyi);
// Apply instantaneous shear BC
// u-component averaged to v-component
REAL uav = 0.25 * (uc + uw + un + unw);
// w-component averaged to v-component
REAL wav = 0.25 * (wc + wb + wn + wbn);
// top/bottom wall-parallel velocity magnitude
REAL magxy = sqrt(uav * uav + vc * vc);
// east/west wall-parallel velocity magnitude
REAL magyz = sqrt(wav * wav + vc * vc);
// Avoid division by zero if velocities are zero
magxy = (magxy < MACHEPS) + (magxy > MACHEPS) * magxy;
magyz = (magyz < MACHEPS) + (magyz > MACHEPS) * magyz;
REAL temp;
// west wall
temp
= (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_W) == BOUNDARY_INSTASHEAR) && (xpos == 1));
tau21m = (1.0 - temp) * tau21m + temp * (tauw21w * (vc / magyz));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw21w = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw21w, tauw21w * ( vc / magyz ));
// east wall
temp = (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_E) == BOUNDARY_INSTASHEAR)
&& (xpos == (DCONSTANT_NX - 2)));
tau21p = (1.0 - temp) * tau21p - temp * (tauw21e * (vc / magyz));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw21e = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw21e, tauw21e * ( vc / magyz ));
// bottom wall
temp = (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_B) == BOUNDARY_INSTASHEAR)
&& (DCONSTANT_DEVICE == 0) && (k == 1));
tau23m = (1.0 - temp) * tau23m + temp * (tauw23b * (vc / magxy));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw23b = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw23b, tauw23b * ( vc / magxy ));
// Only add tau23m when we are on the correct indices
d_tauw23bst[ I - NXNY ] += tau23m * temp;
// top wall
temp = (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_T) == BOUNDARY_INSTASHEAR)
&& (DCONSTANT_DEVICE == (DCONSTANT_GPUCOUNT - 1)) && (k == (nlayers - 2)));
tau23p = (1.0 - temp) * tau23p - temp * (tauw23t * (vc / magxy));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw23t = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw23t,tauw23t * ( vc / magxy ) );
// Only add tau23p when we are on the correct indices
d_tauw23tst[ I - NXNY ] += tau23p * temp;
// Calculate viscous diffusion term
diff = dxi * (tau21p - tau21m) + dyi * (tau22p - tau22m) + dzi * (tau23p - tau23m);
// Calculate convection term
adv = 0.25
* (dxi * ((uc + un) * (vc + ve) - (uw + unw) * (vw + vc))
+ dyi * ((vc + vn) * (vc + vn) - (vs + vc) * (vs + vc))
+ dzi * ((wc + wn) * (vc + vt) - (wb + wbn) * (vb + vc)));
// apply upwind scheme if desired
if (UPWIND > 0.0001) {
adv += UPWIND * 0.25
* (dxi * ((fabs(uc + un) * (vc - ve)) - (fabs(uw + unw) * (vw - vc)))
+ dyi * ((fabs(vc + vn) * (vc - vn)) - (fabs(vs + vc) * (vs - vc)))
+ dzi * ((fabs(wc + wn) * (vc - vt)) - (fabs(wb + wbn) * (vb - vc))));
}
#ifdef TEMPERATURE_SOLUTION
// Apply boussinesq approximation for physical temperature bouyancy effects
if (d_phi != 0) {
// average the temperature about the w velocity point
t_at_v = 0.5 * (d_phi[ base ] + d_phi[ base + NX ]);
boussinesq_approx += boussinesq_bouyancy(DCONSTANT_GRAVITY_Y, t_at_v);
}
#endif // TEMPERATURE_SOLUTION
// REAL coriolis =-DCONSTANT_EKMAN_ANGULARVELOCITY*(uc-DCONSTANT_GEOSTROPHIC_WIND);
// printf("coriolis=%f,geostrophic=%f\n",DCONSTANT_EKMAN_ANGULARVELOCITY,
// DCONSTANT_GEOSTROPHIC_WIND);
REAL start = 0.50;
REAL end = 0.90;
REAL thickness = end - start;
REAL damping_factor = 0.5 * (z > start * DCONSTANT_LZ) * (z < end * DCONSTANT_LZ)
* pow((z - start * DCONSTANT_LZ) / (thickness * DCONSTANT_LZ), 5)
+ 0.5 * (z > end * DCONSTANT_LZ);
REAL rayleigh_damping = -damping_factor * vc;
#ifdef VELOCITY_NUDGING
// Advance the time step
PERFORM_TIME_STEP(time_method, d_vt2[ base ], d_vt1[ base ], d_vnew[ base ], vc,
(diff - adv + boussinesq_approx + rayleigh_damping));
#else // VELOCITY_NUDGING
// Advance the time step
PERFORM_TIME_STEP(time_method, d_vt2[ base ], d_vt1[ base ], vproj, vc,
(diff - adv + boussinesq_approx + rayleigh_damping));
// Apply forcing
if (d_forcing_y != 0) vproj += dt * d_forcing_y[ base ];
d_vnew[ base ] = vproj;
#endif // VELOCITY_NUDGING
if ((!do_mid) && (k == kbeg)) {
k = nlayers - 1;
} else {
k++;
}
}
}
| 13ce1686543ac18a72d172dbf9b268e3799e74db.cu | /** ------------------------------------------------------------------------ **/
/** MOMENTUM WITH EFFECTIVE VISCOSITY **/
/** ------------------------------------------------------------------------ **/
#include "schumann.h"
/*---------------------------------------------------------------------------*/
/*----------------------- Instantaneous Y momentum -----------------------*/
/*---------------------------------------------------------------------------*/
__global__ void momentum_schumann_y(int const sections, int const time_method, const REAL *d_u,
const REAL *d_v, const REAL *d_w, REAL *d_vnew, REAL *d_vt1,
REAL *d_vt2, const REAL *d_nu, const REAL *d_nu_rans,
const REAL *d_tauw21w, const REAL *d_tauw21e,
const REAL *d_tauw23b, const REAL *d_tauw23t, const REAL *d_phi,
const REAL *d_Tinflow, const REAL *d_df,
const REAL *d_forcing_y, REAL *d_tauw23bst, REAL *d_tauw23tst)
{
REAL dt = DCONSTANT_DT;
// REAL H = DCONSTANT_TURB_TRANS;
int const nlayers = DCONSTANT_NLAYERS;
unsigned int xpos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ypos = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int I = ypos * (gridDim.x * blockDim.x) + xpos + NXNY;
if ((xpos == 0) || (xpos >= (DCONSTANT_NX - 1)) || (ypos == 0) || (ypos >= (DCONSTANT_NY - 1))
|| (sections == 0))
return;
unsigned int kbeg, kend;
if (sections & SECTION_BOT) {
kbeg = 0;
} else {
kbeg = 1;
}
if (sections & SECTION_TOP) {
kend = nlayers;
} else {
kend = nlayers - 1;
}
bool do_mid = (sections & SECTION_MID);
int k = kbeg;
#ifndef VELOCITY_NUDGING
REAL vproj;
#endif // VELOCITY_NUDGING
// REAL Dc = 0.000001;
// REAL Dn = 0.000001;
// REAL De = 0.000001;
// REAL Dne = 0.000001;
// REAL Dw = 0.000001;
// REAL Dnw = 0.000001;
// REAL Dt = 0.000001;
// REAL Dtn = 0.000001;
// REAL Db = 0.000001;
// REAL Dbn = 0.000001;
REAL vip = NU; // visc. plus one index
REAL vic = NU; // visc. at c
REAL vxe = NU; // visc. x-dir east
REAL vxw = NU; // visc. x-dir west
REAL vzt = NU; // visc. z-dir top
REAL vzb = NU; // visc. z-dir bottom
// Zero out storage
d_tauw23bst[ I - NXNY ] = 0.0;
d_tauw23tst[ I - NXNY ] = 0.0;
#ifdef TEMP_TURB_INLET_PARTIAL_COUPLING
int tinflow_check_xdir = (xpos > DCONSTANT_PERTURB_GCMIN_X && xpos < DCONSTANT_PERTURB_GCMAX_X
&& ypos > DCONSTANT_PERTURB_GCMIN_Y);
int tinflow_check_ydir = (ypos > DCONSTANT_PERTURB_GCMIN_Y && ypos < DCONSTANT_PERTURB_GCMAX_Y
&& xpos > DCONSTANT_PERTURB_GCMIN_X);
int tinflow_check = (d_Tinflow != 0) && ((tinflow_check_xdir || tinflow_check_ydir));
#else
int tinflow_check = (d_Tinflow != 0);
#endif // TEMP_TURB_INLET_PARTIAL_COUPLING
int eddyvisc_check
= (xpos < DCONSTANT_PERTURB_EDDYVIS_MAX_X || ypos < DCONSTANT_PERTURB_EDDYVIS_MAX_Y);
#ifdef TEMPERATURE_SOLUTION
REAL t_at_v = DCONSTANT_TEMP_ISOTH;
#endif // TEMPERATURE_SOLUTION
#ifdef TEMP_TURB_INLET
REAL ti_at_v = DCONSTANT_TEMP_ISOTH;
#endif // TEMP_TURB_INLET
while (k < kend) {
unsigned int base = I + k * NXNY;
unsigned int north = base + NX;
REAL z = (REAL)(k + DCONSTANT_ZFIRST) * DCONSTANT_DZ;
// Shear stresses
REAL tauw23b = d_tauw23b[ I - NXNY ]; // bottom wall
REAL tauw23t = d_tauw23t[ I - NXNY ]; // top wall
REAL tauw21w = d_tauw21w[ k * NY + ypos ]; // west wall
REAL tauw21e = d_tauw21e[ k * NY + ypos ]; // east wall
REAL diff, adv;
REAL boussinesq_approx = 0.0;
REAL uc = d_u[ base ];
REAL uw = d_u[ base - 1 ];
REAL un = d_u[ north ];
REAL unw = d_u[ base + NX - 1 ];
REAL vc = d_v[ base ];
REAL ve = d_v[ base + 1 ];
REAL vw = d_v[ base - 1 ];
REAL vn = d_v[ north ];
REAL vs = d_v[ base - NX ];
REAL vt = d_v[ base + NXNY ];
REAL vb = d_v[ base - NXNY ];
REAL wc = d_w[ base ];
REAL wn = d_w[ base + NX ];
REAL wb = d_w[ base - NXNY ];
REAL wbn = d_w[ base - NXNY + NX ];
// Average the turbulent viscosities around the v velocity face
if (d_nu != 0) {
// if ( d_nu_rans != 0 )
//{
// Dc = d_df[base]; // The allocation logic should guarantee that d_df is allocated
// if hybrid RANS/LES is chosen Dn = d_df[north]; De = d_df[base+1]; Dne =
// d_df[base+NX+1]; Dw = d_df[base-1]; Dnw = d_df[base+NX-1]; Dt = d_df[base+NXNY];
// Dtn = d_df[base+NXNY+NX];
// Db = d_df[base-NXNY];
// Dbn = d_df[base-NXNY+NX];
// vip = NU + d_nu_rans[north] * ( Dn <= H ) + d_nu[north] * ( Dn > H );
// vic = NU + d_nu_rans[base] * ( Dc <= H ) + d_nu[base] * ( Dc > H );
// if ( ( Dc > H ) || ( Dn > H ) || ( Dne > H ) || ( De > H ) ) {
// vxe = NU + 0.25*(d_nu[base]+ d_nu[north] + d_nu[base+NX+1] + d_nu[base+1]);
// } else {
// vxe = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base+NX+1] +
// d_nu_rans[base+1]);
// }
// if ( ( Dc > H ) || ( Dn > H ) || ( Dnw > H ) || ( Dw > H ) ) {
// vxw = NU + 0.25*(d_nu[base] + d_nu[north] + d_nu[base+NX-1] + d_nu[base-1]);
// } else {
// vxw = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base+NX-1] +
// d_nu_rans[base-1]);
// }
// if ( ( Dc > H ) || ( Dn > H ) || ( Dtn > H ) || ( Dt > H ) ) {
// vzt = NU + 0.25*(d_nu[base] + d_nu[north] + d_nu[base+NXNY+NX] +
// d_nu[base+NXNY]);
// } else {
// vzt = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base+NXNY+NX] +
// d_nu_rans[base+NXNY]);
// }
// if ( ( Dc > H ) || ( Dn > H ) || ( Dbn > H ) || ( Db > H ) ) {
// vzb = NU + 0.25*(d_nu[base] + d_nu[north] + d_nu[base-NXNY+NX] +
// d_nu[base-NXNY]);
// } else {
// vzb = NU + 0.25*(d_nu_rans[base] + d_nu_rans[north] + d_nu_rans[base-NXNY+NX] +
// d_nu_rans[base-NXNY]);
// }
//} else
//{
// Eliminate eddy viscosity in perturbation zone
REAL nu_perturb = (1.0 - eddyvisc_check);
vic = nu_perturb * d_nu[ base ]; // use to reduce global memory accesses
vip = nu_perturb * d_nu[ north ]; // use to reduce global memory accesses
vxe = NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base + NX + 1 ] + d_nu[ base + 1 ]);
vxw = NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base + NX - 1 ] + d_nu[ base - 1 ]);
vzt
= NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base + NXNY + NX ] + d_nu[ base + NXNY ]);
vzb
= NU + nu_perturb * 0.25 * (vic + vip + d_nu[ base - NXNY + NX ] + d_nu[ base - NXNY ]);
vic += NU; // Add molecular viscosity
vip += NU; // Add molecular viscosity
//}
}
#ifdef TEMP_TURB_INLET
// Apply boussinesq approximation for turbulent inflow temperature bouyancy effects
if (tinflow_check) {
// average the temperature about the w velocity point
ti_at_v = 0.5 * (d_Tinflow[ base ] + d_Tinflow[ base + NX ]);
}
boussinesq_approx += boussinesq_bouyancy(DCONSTANT_GRAVITY_Y, ti_at_v);
#endif // TEMP_TURB_INLET
// Calculate shear terms
REAL tau21p = vxe * ((ve - vc) * dxi + (un - uc) * dyi);
REAL tau21m = vxw * ((vc - vw) * dxi + (unw - uw) * dyi);
REAL tau22p = 2.0 * vip * (vn - vc) * dyi;
REAL tau22m = 2.0 * vic * (vc - vs) * dyi;
REAL tau23p = vzt * ((vt - vc) * dzi + (wn - wc) * dyi);
REAL tau23m = vzb * ((vc - vb) * dzi + (wbn - wb) * dyi);
// Apply instantaneous shear BC
// u-component averaged to v-component
REAL uav = 0.25 * (uc + uw + un + unw);
// w-component averaged to v-component
REAL wav = 0.25 * (wc + wb + wn + wbn);
// top/bottom wall-parallel velocity magnitude
REAL magxy = sqrt(uav * uav + vc * vc);
// east/west wall-parallel velocity magnitude
REAL magyz = sqrt(wav * wav + vc * vc);
// Avoid division by zero if velocities are zero
magxy = (magxy < MACHEPS) + (magxy > MACHEPS) * magxy;
magyz = (magyz < MACHEPS) + (magyz > MACHEPS) * magyz;
REAL temp;
// west wall
temp
= (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_W) == BOUNDARY_INSTASHEAR) && (xpos == 1));
tau21m = (1.0 - temp) * tau21m + temp * (tauw21w * (vc / magyz));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw21w = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw21w, tauw21w * ( vc / magyz ));
// east wall
temp = (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_E) == BOUNDARY_INSTASHEAR)
&& (xpos == (DCONSTANT_NX - 2)));
tau21p = (1.0 - temp) * tau21p - temp * (tauw21e * (vc / magyz));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw21e = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw21e, tauw21e * ( vc / magyz ));
// bottom wall
temp = (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_B) == BOUNDARY_INSTASHEAR)
&& (DCONSTANT_DEVICE == 0) && (k == 1));
tau23m = (1.0 - temp) * tau23m + temp * (tauw23b * (vc / magxy));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw23b = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw23b, tauw23b * ( vc / magxy ));
// Only add tau23m when we are on the correct indices
d_tauw23bst[ I - NXNY ] += tau23m * temp;
// top wall
temp = (REAL)((GET_FACE_VALUE(DCONSTANT_FACES, FACE_T) == BOUNDARY_INSTASHEAR)
&& (DCONSTANT_DEVICE == (DCONSTANT_GPUCOUNT - 1)) && (k == (nlayers - 2)));
tau23p = (1.0 - temp) * tau23p - temp * (tauw23t * (vc / magxy));
// if ( temp > 0.0 ) printf("[ %d %d %d ] tauw23t = %.15f schu = %.15f\n",xpos, ypos, k,
// tauw23t,tauw23t * ( vc / magxy ) );
// Only add tau23p when we are on the correct indices
d_tauw23tst[ I - NXNY ] += tau23p * temp;
// Calculate viscous diffusion term
diff = dxi * (tau21p - tau21m) + dyi * (tau22p - tau22m) + dzi * (tau23p - tau23m);
// Calculate convection term
adv = 0.25
* (dxi * ((uc + un) * (vc + ve) - (uw + unw) * (vw + vc))
+ dyi * ((vc + vn) * (vc + vn) - (vs + vc) * (vs + vc))
+ dzi * ((wc + wn) * (vc + vt) - (wb + wbn) * (vb + vc)));
// apply upwind scheme if desired
if (UPWIND > 0.0001) {
adv += UPWIND * 0.25
* (dxi * ((fabs(uc + un) * (vc - ve)) - (fabs(uw + unw) * (vw - vc)))
+ dyi * ((fabs(vc + vn) * (vc - vn)) - (fabs(vs + vc) * (vs - vc)))
+ dzi * ((fabs(wc + wn) * (vc - vt)) - (fabs(wb + wbn) * (vb - vc))));
}
#ifdef TEMPERATURE_SOLUTION
// Apply boussinesq approximation for physical temperature bouyancy effects
if (d_phi != 0) {
// average the temperature about the w velocity point
t_at_v = 0.5 * (d_phi[ base ] + d_phi[ base + NX ]);
boussinesq_approx += boussinesq_bouyancy(DCONSTANT_GRAVITY_Y, t_at_v);
}
#endif // TEMPERATURE_SOLUTION
// REAL coriolis =-DCONSTANT_EKMAN_ANGULARVELOCITY*(uc-DCONSTANT_GEOSTROPHIC_WIND);
// printf("coriolis=%f,geostrophic=%f\n",DCONSTANT_EKMAN_ANGULARVELOCITY,
// DCONSTANT_GEOSTROPHIC_WIND);
REAL start = 0.50;
REAL end = 0.90;
REAL thickness = end - start;
REAL damping_factor = 0.5 * (z > start * DCONSTANT_LZ) * (z < end * DCONSTANT_LZ)
* pow((z - start * DCONSTANT_LZ) / (thickness * DCONSTANT_LZ), 5)
+ 0.5 * (z > end * DCONSTANT_LZ);
REAL rayleigh_damping = -damping_factor * vc;
#ifdef VELOCITY_NUDGING
// Advance the time step
PERFORM_TIME_STEP(time_method, d_vt2[ base ], d_vt1[ base ], d_vnew[ base ], vc,
(diff - adv + boussinesq_approx + rayleigh_damping));
#else // VELOCITY_NUDGING
// Advance the time step
PERFORM_TIME_STEP(time_method, d_vt2[ base ], d_vt1[ base ], vproj, vc,
(diff - adv + boussinesq_approx + rayleigh_damping));
// Apply forcing
if (d_forcing_y != 0) vproj += dt * d_forcing_y[ base ];
d_vnew[ base ] = vproj;
#endif // VELOCITY_NUDGING
if ((!do_mid) && (k == kbeg)) {
k = nlayers - 1;
} else {
k++;
}
}
}
|
e152a2aa89593ca899d4e9e3c391a8c36cdcfff9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
/**
* Device kernel that takes 2 arguments
* @param bad_thread represents the thread we want to trigger assertion on.
* @param bad_block represents the block we want to trigger assertion on.
* This kernel will only trigger a device side assertion for <<bad_block,
* bad_thread>> pair. all the other blocks and threads pairs will basically be
* no-op.
*/
__global__ void cuda_device_assertions_fail_on_thread_block_kernel(
const int bad_thread,
const int bad_block,
TORCH_DSA_KERNEL_ARGS) {
if (threadIdx.x == bad_thread && blockIdx.x == bad_block) {
CUDA_KERNEL_ASSERT2(false); // This comparison necessarily needs to fail
}
}
/**
* TEST: Triggering device side assertion on only 1 thread from <<<1024,128>>>
* grid. kernel used is unique, it take 2 parameters to tell which particular
* block and thread it should assert, all the other theads of the kernel will be
* basically no-op.
*/
void cuda_device_assertions_catches_thread_and_block_and_device() {
const auto stream = c10::hip::getStreamFromPoolMasqueradingAsCUDA();
TORCH_DSA_KERNEL_LAUNCH(
cuda_device_assertions_fail_on_thread_block_kernel,
1024, /* Blocks */
128, /* Threads */
0, /* Shared mem */
stream, /* Stream */
29, /* bad thread */
937 /* bad block */
);
try {
c10::hip::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [29,0,0]"));
ASSERT_THAT(
err_str, HasSubstr("Block ID that failed assertion = [937,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = cuda_device_assertions_fail_on_thread_block_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_catches_thread_and_block_and_device) {
#ifdef TORCH_USE_CUDA_DSA
c10::hip::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = true;
cuda_device_assertions_catches_thread_and_block_and_device();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
| e152a2aa89593ca899d4e9e3c391a8c36cdcfff9.cu | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
/**
* Device kernel that takes 2 arguments
* @param bad_thread represents the thread we want to trigger assertion on.
* @param bad_block represents the block we want to trigger assertion on.
* This kernel will only trigger a device side assertion for <<bad_block,
* bad_thread>> pair. all the other blocks and threads pairs will basically be
* no-op.
*/
__global__ void cuda_device_assertions_fail_on_thread_block_kernel(
const int bad_thread,
const int bad_block,
TORCH_DSA_KERNEL_ARGS) {
if (threadIdx.x == bad_thread && blockIdx.x == bad_block) {
CUDA_KERNEL_ASSERT2(false); // This comparison necessarily needs to fail
}
}
/**
* TEST: Triggering device side assertion on only 1 thread from <<<1024,128>>>
* grid. kernel used is unique, it take 2 parameters to tell which particular
* block and thread it should assert, all the other theads of the kernel will be
* basically no-op.
*/
void cuda_device_assertions_catches_thread_and_block_and_device() {
const auto stream = c10::cuda::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
cuda_device_assertions_fail_on_thread_block_kernel,
1024, /* Blocks */
128, /* Threads */
0, /* Shared mem */
stream, /* Stream */
29, /* bad thread */
937 /* bad block */
);
try {
c10::cuda::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [29,0,0]"));
ASSERT_THAT(
err_str, HasSubstr("Block ID that failed assertion = [937,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = cuda_device_assertions_fail_on_thread_block_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_catches_thread_and_block_and_device) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled = true;
cuda_device_assertions_catches_thread_and_block_and_device();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
|
9f68974314ba69e7b4b8b17a977b6866aa526da1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
static __global__ void pti_sMulKernel(
ptiValue * __restrict__ X_val,
ptiNnzIndex X_nnz,
ptiValue a)
{
ptiNnzIndex num_loops_nnz = 1;
ptiNnzIndex const nnz_per_loop = gridDim.x * blockDim.x;
if(X_nnz > nnz_per_loop) {
num_loops_nnz = (X_nnz + nnz_per_loop - 1) / nnz_per_loop;
}
const ptiNnzIndex tidx = threadIdx.x;
ptiNnzIndex x;
for(ptiNnzIndex nl=0; nl<num_loops_nnz; ++nl) {
x = blockIdx.x * blockDim.x + tidx + nl * nnz_per_loop;
if(x < X_nnz) {
X_val[x] = a * X_val[x];
}
}
}
/**
* Multiply a sparse tensors with a scalar.
* @param[in] a the input scalar
* @param[in/out] X the input/output tensor
*/
int ptiCudaSparseTensorMulScalar(ptiSparseTensor *X, ptiValue a)
{
ptiAssert(a != 0.0);
int result;
ptiTimer timer;
ptiNewTimer(&timer, 0);
double comp_time;
/* Device memory allocation */
ptiValue *X_val = NULL;
result = hipMalloc((void **) &X_val, X->nnz * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "Cuda ptins MulScalar");
/* Device memory copy */
hipMemcpy(X_val, X->values.data, X->nnz * sizeof (ptiValue), hipMemcpyHostToDevice);
ptiStartTimer(timer);
const ptiNnzIndex max_nblocks = 32768;
const ptiNnzIndex max_nthreads_per_block = 256;
ptiNnzIndex nthreadsx = 1;
ptiNnzIndex all_nblocks = 0;
ptiNnzIndex nblocks = 0;
if(X->nnz < max_nthreads_per_block) {
nthreadsx = X->nnz;
nblocks = 1;
} else {
nthreadsx = max_nthreads_per_block;
all_nblocks = (X->nnz + nthreadsx -1) / nthreadsx;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
}
dim3 dimBlock(nthreadsx);
printf("all_nblocks: %lu, nthreadsx: %lu\n", all_nblocks, nthreadsx);
printf("[Cuda ptins MulScalar] pti_sMulKernel<<<%lu, (%lu)>>>\n", nblocks, nthreadsx);
hipLaunchKernelGGL(( pti_sMulKernel), dim3(nblocks), dim3(dimBlock), 0, 0, X_val, X->nnz, a);
result = hipDeviceSynchronize();
pti_CheckCudaError(result != 0, "Cuda ptins MulScalar kernel");
ptiStopTimer(timer);
comp_time = ptiPrintElapsedTime(timer, "Cuda ptins MulScalar");
hipMemcpy(X->values.data, X_val, X->nnz * sizeof (ptiValue), hipMemcpyDeviceToHost);
ptiFreeTimer(timer);
result = hipFree(X_val);
pti_CheckCudaError(result != 0, "Cuda ptins MulScalar");
printf("[GPU CooMulScalar]: %lf\n", comp_time);
printf("\n");
return 0;
}
| 9f68974314ba69e7b4b8b17a977b6866aa526da1.cu | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
static __global__ void pti_sMulKernel(
ptiValue * __restrict__ X_val,
ptiNnzIndex X_nnz,
ptiValue a)
{
ptiNnzIndex num_loops_nnz = 1;
ptiNnzIndex const nnz_per_loop = gridDim.x * blockDim.x;
if(X_nnz > nnz_per_loop) {
num_loops_nnz = (X_nnz + nnz_per_loop - 1) / nnz_per_loop;
}
const ptiNnzIndex tidx = threadIdx.x;
ptiNnzIndex x;
for(ptiNnzIndex nl=0; nl<num_loops_nnz; ++nl) {
x = blockIdx.x * blockDim.x + tidx + nl * nnz_per_loop;
if(x < X_nnz) {
X_val[x] = a * X_val[x];
}
}
}
/**
* Multiply a sparse tensors with a scalar.
* @param[in] a the input scalar
* @param[in/out] X the input/output tensor
*/
int ptiCudaSparseTensorMulScalar(ptiSparseTensor *X, ptiValue a)
{
ptiAssert(a != 0.0);
int result;
ptiTimer timer;
ptiNewTimer(&timer, 0);
double comp_time;
/* Device memory allocation */
ptiValue *X_val = NULL;
result = cudaMalloc((void **) &X_val, X->nnz * sizeof (ptiValue));
pti_CheckCudaError(result != 0, "Cuda ptins MulScalar");
/* Device memory copy */
cudaMemcpy(X_val, X->values.data, X->nnz * sizeof (ptiValue), cudaMemcpyHostToDevice);
ptiStartTimer(timer);
const ptiNnzIndex max_nblocks = 32768;
const ptiNnzIndex max_nthreads_per_block = 256;
ptiNnzIndex nthreadsx = 1;
ptiNnzIndex all_nblocks = 0;
ptiNnzIndex nblocks = 0;
if(X->nnz < max_nthreads_per_block) {
nthreadsx = X->nnz;
nblocks = 1;
} else {
nthreadsx = max_nthreads_per_block;
all_nblocks = (X->nnz + nthreadsx -1) / nthreadsx;
if(all_nblocks < max_nblocks) {
nblocks = all_nblocks;
} else {
nblocks = max_nblocks;
}
}
dim3 dimBlock(nthreadsx);
printf("all_nblocks: %lu, nthreadsx: %lu\n", all_nblocks, nthreadsx);
printf("[Cuda ptins MulScalar] pti_sMulKernel<<<%lu, (%lu)>>>\n", nblocks, nthreadsx);
pti_sMulKernel<<<nblocks, dimBlock>>>(X_val, X->nnz, a);
result = cudaThreadSynchronize();
pti_CheckCudaError(result != 0, "Cuda ptins MulScalar kernel");
ptiStopTimer(timer);
comp_time = ptiPrintElapsedTime(timer, "Cuda ptins MulScalar");
cudaMemcpy(X->values.data, X_val, X->nnz * sizeof (ptiValue), cudaMemcpyDeviceToHost);
ptiFreeTimer(timer);
result = cudaFree(X_val);
pti_CheckCudaError(result != 0, "Cuda ptins MulScalar");
printf("[GPU CooMulScalar]: %lf\n", comp_time);
printf("\n");
return 0;
}
|
ref-saxpy.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "shared.cuh"
struct ParticleRef {
Point pos;
Point dir;
double nextdist;
};
inline __device__ ParticleRef make_ref(const ParticleView &view, int i) {
return {view.get_pos(i), view.get_dir(i), view.get_nextdist(i)};
}
__device__ inline void saxpy(double *__restrict__ x, double *__restrict__ y,
double *__restrict__ z,
const double *__restrict__ u,
const double *__restrict__ v,
const double *__restrict__ w, double distance) {
*x += *u * distance;
*y += *v * distance;
*z += *w * distance;
}
__device__ inline void move_impl(const ParticleRef ref) {
saxpy(ref.pos.x, ref.pos.y, ref.pos.z, ref.dir.x, ref.dir.y, ref.dir.z,
ref.nextdist);
}
__global__ void move(ParticleView view) {
int i = thread_id();
if (i >= view.size) return;
move_impl(make_ref(view, i));
}
| ref-saxpy.cu | #include "shared.cuh"
struct ParticleRef {
Point pos;
Point dir;
double nextdist;
};
inline __device__ ParticleRef make_ref(const ParticleView &view, int i) {
return {view.get_pos(i), view.get_dir(i), view.get_nextdist(i)};
}
__device__ inline void saxpy(double *__restrict__ x, double *__restrict__ y,
double *__restrict__ z,
const double *__restrict__ u,
const double *__restrict__ v,
const double *__restrict__ w, double distance) {
*x += *u * distance;
*y += *v * distance;
*z += *w * distance;
}
__device__ inline void move_impl(const ParticleRef ref) {
saxpy(ref.pos.x, ref.pos.y, ref.pos.z, ref.dir.x, ref.dir.y, ref.dir.z,
ref.nextdist);
}
__global__ void move(ParticleView view) {
int i = thread_id();
if (i >= view.size) return;
move_impl(make_ref(view, i));
}
|
abc0ab878ac2ea3a3b0ba0351bd7e75c0ff6a85b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "make_pillar_index_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dev_pillar_count_histo = NULL;
hipMalloc(&dev_pillar_count_histo, XSIZE*YSIZE);
int *dev_counter = NULL;
hipMalloc(&dev_counter, XSIZE*YSIZE);
int *dev_pillar_count = NULL;
hipMalloc(&dev_pillar_count, XSIZE*YSIZE);
int *dev_x_coors = NULL;
hipMalloc(&dev_x_coors, XSIZE*YSIZE);
int *dev_y_coors = NULL;
hipMalloc(&dev_y_coors, XSIZE*YSIZE);
float *dev_x_coors_for_sub = NULL;
hipMalloc(&dev_x_coors_for_sub, XSIZE*YSIZE);
float *dev_y_coors_for_sub = NULL;
hipMalloc(&dev_y_coors_for_sub, XSIZE*YSIZE);
float *dev_num_points_per_pillar = NULL;
hipMalloc(&dev_num_points_per_pillar, XSIZE*YSIZE);
int *dev_sparse_pillar_map = NULL;
hipMalloc(&dev_sparse_pillar_map, XSIZE*YSIZE);
const int max_pillars = 1;
const int max_points_per_pillar = 1;
const int GRID_X_SIZE = 1;
const float PILLAR_X_SIZE = 1;
const float PILLAR_Y_SIZE = 1;
const int NUM_INDS_FOR_SCAN = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
make_pillar_index_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_pillar_count_histo,dev_counter,dev_pillar_count,dev_x_coors,dev_y_coors,dev_x_coors_for_sub,dev_y_coors_for_sub,dev_num_points_per_pillar,dev_sparse_pillar_map,max_pillars,max_points_per_pillar,GRID_X_SIZE,PILLAR_X_SIZE,PILLAR_Y_SIZE,NUM_INDS_FOR_SCAN);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
make_pillar_index_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_pillar_count_histo,dev_counter,dev_pillar_count,dev_x_coors,dev_y_coors,dev_x_coors_for_sub,dev_y_coors_for_sub,dev_num_points_per_pillar,dev_sparse_pillar_map,max_pillars,max_points_per_pillar,GRID_X_SIZE,PILLAR_X_SIZE,PILLAR_Y_SIZE,NUM_INDS_FOR_SCAN);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
make_pillar_index_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_pillar_count_histo,dev_counter,dev_pillar_count,dev_x_coors,dev_y_coors,dev_x_coors_for_sub,dev_y_coors_for_sub,dev_num_points_per_pillar,dev_sparse_pillar_map,max_pillars,max_points_per_pillar,GRID_X_SIZE,PILLAR_X_SIZE,PILLAR_Y_SIZE,NUM_INDS_FOR_SCAN);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | abc0ab878ac2ea3a3b0ba0351bd7e75c0ff6a85b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "make_pillar_index_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dev_pillar_count_histo = NULL;
cudaMalloc(&dev_pillar_count_histo, XSIZE*YSIZE);
int *dev_counter = NULL;
cudaMalloc(&dev_counter, XSIZE*YSIZE);
int *dev_pillar_count = NULL;
cudaMalloc(&dev_pillar_count, XSIZE*YSIZE);
int *dev_x_coors = NULL;
cudaMalloc(&dev_x_coors, XSIZE*YSIZE);
int *dev_y_coors = NULL;
cudaMalloc(&dev_y_coors, XSIZE*YSIZE);
float *dev_x_coors_for_sub = NULL;
cudaMalloc(&dev_x_coors_for_sub, XSIZE*YSIZE);
float *dev_y_coors_for_sub = NULL;
cudaMalloc(&dev_y_coors_for_sub, XSIZE*YSIZE);
float *dev_num_points_per_pillar = NULL;
cudaMalloc(&dev_num_points_per_pillar, XSIZE*YSIZE);
int *dev_sparse_pillar_map = NULL;
cudaMalloc(&dev_sparse_pillar_map, XSIZE*YSIZE);
const int max_pillars = 1;
const int max_points_per_pillar = 1;
const int GRID_X_SIZE = 1;
const float PILLAR_X_SIZE = 1;
const float PILLAR_Y_SIZE = 1;
const int NUM_INDS_FOR_SCAN = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
make_pillar_index_kernel<<<gridBlock,threadBlock>>>(dev_pillar_count_histo,dev_counter,dev_pillar_count,dev_x_coors,dev_y_coors,dev_x_coors_for_sub,dev_y_coors_for_sub,dev_num_points_per_pillar,dev_sparse_pillar_map,max_pillars,max_points_per_pillar,GRID_X_SIZE,PILLAR_X_SIZE,PILLAR_Y_SIZE,NUM_INDS_FOR_SCAN);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
make_pillar_index_kernel<<<gridBlock,threadBlock>>>(dev_pillar_count_histo,dev_counter,dev_pillar_count,dev_x_coors,dev_y_coors,dev_x_coors_for_sub,dev_y_coors_for_sub,dev_num_points_per_pillar,dev_sparse_pillar_map,max_pillars,max_points_per_pillar,GRID_X_SIZE,PILLAR_X_SIZE,PILLAR_Y_SIZE,NUM_INDS_FOR_SCAN);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
make_pillar_index_kernel<<<gridBlock,threadBlock>>>(dev_pillar_count_histo,dev_counter,dev_pillar_count,dev_x_coors,dev_y_coors,dev_x_coors_for_sub,dev_y_coors_for_sub,dev_num_points_per_pillar,dev_sparse_pillar_map,max_pillars,max_points_per_pillar,GRID_X_SIZE,PILLAR_X_SIZE,PILLAR_Y_SIZE,NUM_INDS_FOR_SCAN);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
57cc20377db6b01aa92a03735ed50fabf0c06527.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "kernel1.h"
//extern __shared__ float s_data[];
////////////////////////////////////////////////////////////////////////////////
//! Weighted Jacobi Iteration
//! @param g_dataA input data in global memory
//! @param g_dataB output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void k1( float* g_dataA, float* g_dataB, int floatpitch, int width)
{
extern __shared__ float s_data[];
// TODO, implement this kernel below
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
y = y + 1; //because the edge of the data is not processed
// global thread(data) column index
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
x = x + 1; //because the edge of the data is not processed
if( y >=width - 1|| x >= width - 1 || y < 1 || x < 1 )// this is to check to make sure that the thread is within the array.
return;
int startRow = blockIdx.y;
int startCol = blockDim.x * blockIdx.x;
int s_rowwidth = blockDim.x +2; // because the blocks have to overlap on the right side that is why you add 2
int s_index0 = threadIdx.x +1; //row zero in s_data. you add one because you don't deal with the outer edge
int s_index1 = threadIdx.x + s_rowwidth + 1; //row one in s_data.so this goes to the other side
int s_index2 = threadIdx.x + 2 * s_rowwidth +1; //this is to get the last
//int s_index_result = threadIdx.x + 3 * s_rowwidth + 1;
int mid_row = blockIdx.x * blockDim.x + 1 + floatpitch * blockIdx.y;
int g_index0 = (mid_row -1) * floatpitch + startCol + 1+ threadIdx.x;
int g_index1 = (mid_row) * floatpitch + startCol + 1 + threadIdx.x;
int g_index2 = (mid_row +1) * floatpitch +startCol + 1 + threadIdx.x;
if(startCol + startRow + 1 < width -1)
{
//copy the data from gobal mem to shared mem
s_data[s_index0] = g_dataA[g_index0];
s_data[s_index1] = g_dataA[g_index1];
s_data[s_index2] = g_dataA[g_index2];
}//end of if statement to populate the middle row of the current block
if(startRow == 0)
{
//copy the extra two columns in the globabl mem
s_data[s_index0 -1] = g_dataA[g_index0 - 1];
s_data[s_index1 -1] = g_dataA[g_index1 -1];
s_data[s_index2 -1] = g_dataA[g_index2 -1];
}//end of if statement to populate the edge row
if(threadIdx.x == width -3 - startCol || threadIdx.x == blockDim.x-1)
{
s_data[s_index0 + 1] = g_dataA[g_index0 +1];
s_data[s_index1 + 1] = g_dataA[g_index1 +1];
s_data[s_index2 +1] = g_dataA[g_index2 + 1];
}//end of if statement to populate the row below the middle row
__syncthreads();
//if( x >= width - 1|| y >= width - 1 || x < 1 || y < 1 )// this is to check to make sure that the thread is within the array.
// return;
//this is copied from the other kernel
g_dataB[y * width + x] = (
0.2f * s_data[s_index1] + //itself s_ind_1
0.1f * s_data[s_index0 -1] + //N s_ind_0
0.1f * s_data[s_index0 +1] + //NE s_ind_0
0.1f * s_data[s_index0 ] + //E s_ind1
0.1f * s_data[s_index1 +1] + //SE s_ind2
0.1f * s_data[s_index1 -1] + //S s_ind2
0.1f * s_data[s_index2 ] + //SW
0.1f * s_data[s_index2 -1] + //W
0.1f * s_data[s_index2 +1] //NW
) * 0.95f;//*/
}
| 57cc20377db6b01aa92a03735ed50fabf0c06527.cu | #include <stdio.h>
#include "kernel1.h"
//extern __shared__ float s_data[];
////////////////////////////////////////////////////////////////////////////////
//! Weighted Jacobi Iteration
//! @param g_dataA input data in global memory
//! @param g_dataB output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void k1( float* g_dataA, float* g_dataB, int floatpitch, int width)
{
extern __shared__ float s_data[];
// TODO, implement this kernel below
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
y = y + 1; //because the edge of the data is not processed
// global thread(data) column index
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
x = x + 1; //because the edge of the data is not processed
if( y >=width - 1|| x >= width - 1 || y < 1 || x < 1 )// this is to check to make sure that the thread is within the array.
return;
int startRow = blockIdx.y;
int startCol = blockDim.x * blockIdx.x;
int s_rowwidth = blockDim.x +2; // because the blocks have to overlap on the right side that is why you add 2
int s_index0 = threadIdx.x +1; //row zero in s_data. you add one because you don't deal with the outer edge
int s_index1 = threadIdx.x + s_rowwidth + 1; //row one in s_data.so this goes to the other side
int s_index2 = threadIdx.x + 2 * s_rowwidth +1; //this is to get the last
//int s_index_result = threadIdx.x + 3 * s_rowwidth + 1;
int mid_row = blockIdx.x * blockDim.x + 1 + floatpitch * blockIdx.y;
int g_index0 = (mid_row -1) * floatpitch + startCol + 1+ threadIdx.x;
int g_index1 = (mid_row) * floatpitch + startCol + 1 + threadIdx.x;
int g_index2 = (mid_row +1) * floatpitch +startCol + 1 + threadIdx.x;
if(startCol + startRow + 1 < width -1)
{
//copy the data from gobal mem to shared mem
s_data[s_index0] = g_dataA[g_index0];
s_data[s_index1] = g_dataA[g_index1];
s_data[s_index2] = g_dataA[g_index2];
}//end of if statement to populate the middle row of the current block
if(startRow == 0)
{
//copy the extra two columns in the globabl mem
s_data[s_index0 -1] = g_dataA[g_index0 - 1];
s_data[s_index1 -1] = g_dataA[g_index1 -1];
s_data[s_index2 -1] = g_dataA[g_index2 -1];
}//end of if statement to populate the edge row
if(threadIdx.x == width -3 - startCol || threadIdx.x == blockDim.x-1)
{
s_data[s_index0 + 1] = g_dataA[g_index0 +1];
s_data[s_index1 + 1] = g_dataA[g_index1 +1];
s_data[s_index2 +1] = g_dataA[g_index2 + 1];
}//end of if statement to populate the row below the middle row
__syncthreads();
//if( x >= width - 1|| y >= width - 1 || x < 1 || y < 1 )// this is to check to make sure that the thread is within the array.
// return;
//this is copied from the other kernel
g_dataB[y * width + x] = (
0.2f * s_data[s_index1] + //itself s_ind_1
0.1f * s_data[s_index0 -1] + //N s_ind_0
0.1f * s_data[s_index0 +1] + //NE s_ind_0
0.1f * s_data[s_index0 ] + //E s_ind1
0.1f * s_data[s_index1 +1] + //SE s_ind2
0.1f * s_data[s_index1 -1] + //S s_ind2
0.1f * s_data[s_index2 ] + //SW
0.1f * s_data[s_index2 -1] + //W
0.1f * s_data[s_index2 +1] //NW
) * 0.95f;//*/
}
|
9d6ae35232c056f698ed3e4c0bb617c97f926066.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <boost/gil/rgb.hpp>
#include <boost/gil/extension/io/png_dynamic_io.hpp>
#include <stdint.h>
#include <math.h>
#include <iostream>
#include <vector>
#include "canny_p.h"
using namespace boost::gil;
using namespace std;
/*create a gaussian filter*/
__device__
Matrix createKernel(int height, int width, double sigma)
{
Matrix kernel(height, Array(width));
double sum=0.0;
int i,j;
for (i=0 ; i<height ; i++) {
for (j=0 ; j<width ; j++) {
kernel[i][j] = exp(-(i*i+j*j)/(2*sigma*sigma))/(2*M_PI*sigma*sigma);
sum += kernel[i][j];
}
}
for (i=0 ; i<height ; i++) {
for (j=0 ; j<width ; j++) {
kernel[i][j] /= sum;
}
}
return kernel;
}
/*Step 1 blur the image to reduce noice*/
__global__
void gaussian_filter(gray8_pixel_t **newImage,gray8_pixel_t **in_pixels,int width, int height)
{
Matrix filter = createKernel(5, 5, 10.0);
int filterHeight = filter.size();
int filterWidth = filter[0].size();
int newImageHeight = height-filterHeight;
int newImageWidth = width-filterWidth;
int h,w;
/*allocate newimage*/
int i = threadIdx.x;
int j = threadIdx.y;
//
// for (i=0 ; i<newImageHeight ; i++) {
// for (j=0 ; j<newImageWidth ; j++) {
for (h=i ; h<i+filterHeight ; h++) {
for (w=j ; w<j+filterWidth ; w++) {
newImage[i][j] = newImage[i][j] +filter[h-i][w-j]*in_pixels[h][w];
}
}
__syncthreads();
// }
// }
//__syncthreads();
}
__global__
void gradient(gray8_pixel_t **newImage, gray8_pixel_t **in_pixels, int width, int height,
gray8_pixel_t **deltaX, gray8_pixel_t **deltaY)
{
// compute delta X ***************************
// deltaX = f(x+1) - f(x-1)
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++){
if (j == 0) deltaX[i][j] = in_pixels[i][j + 1] - in_pixels[i][j];
else if (j == width - 1)deltaX[i][j] = in_pixels[i][j] - in_pixels[i][j - 1];
else deltaX[i][j] = in_pixels[i][j + 1] - in_pixels[i][j - 1];
}
}
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
if (i == 0) deltaY[i][j] = in_pixels[i+1][j] - in_pixels[i][j];
else if (i == height - 1)deltaY[i][j] = in_pixels[i][j] - in_pixels[i-1][j];
else deltaY[i][j] = in_pixels[i+1][j] - in_pixels[i-1][j];
}
}
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
newImage[i][j] = (gray8_pixel_t)(sqrt((double)deltaX[i][j] * deltaX[i][j] +
(double)deltaY[i][j] * deltaY[i][j]) + 0.5);
}
}
}
__global__
void suppress(gray8_pixel_t **newImage, gray8_pixel_t **mag, int width, int height,
gray8_pixel_t **deltaX, gray8_pixel_t **deltaY)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
for (int k = 0; k < height; ++k) {
newImage[height - 1][k] = 0;
newImage[0][k] = 0;
newImage[k][0] = 0;
newImage[k][width - 1]= 0;
}
// skip boundaries of image
// start and stop 1 pixel inner pixels from boundaries
for(unsigned i = 1; i < height-1; i++)
{
for(unsigned j = 1; j < width-1; j++)
{
// if magnitude = 0, no edge
if(mag[i][j] == 0) newImage[i][j] = 0;//suppressed
else{
if(deltaX[i][j] >= 0)
{
if(deltaY[i][j] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[i][j] - deltaY[i][j]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[i][j] / deltaX[i][j];
mag1 = (1-alpha)*mag[i][j+1] + alpha*mag[i+1][j+1];
mag2 = (1-alpha)*mag[i][j-1] + alpha*mag[i-1][j-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[i][j] / deltaY[i][j];
mag1 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j+1];
mag2 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[i][j] + deltaY[i][j]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[i][j] / deltaX[i][j];
mag1 = (1-alpha)*mag[i][j+1] + alpha*mag[i-1][j+1];
mag2 = (1-alpha)*mag[i][j-1] + alpha*mag[i+1][j-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[i][j] / -deltaY[i][j];
mag1 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j-1];
mag2 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j+1];
}
}
}
else
{
if(deltaY[i][j] >= 0) // dx < 0, dy >= 0
{
if((deltaX[i][j] - deltaY[i][j]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[i][j] / deltaY[i][j];
mag1 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j-1];
mag2 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[i][j] / -deltaX[i][j];
mag1 = (1-alpha)*mag[i][j-1] + alpha*mag[i+1][j-1];
mag2 = (1-alpha)*mag[i][j+1] + alpha*mag[i-1][j+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[i][j] + deltaY[i][j]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[i][j] / deltaX[i][j];
mag1 = (1-alpha)*mag[i][j-1] + alpha*mag[i-1][j-1];
mag2 = (1-alpha)*mag[i][j+1] + alpha*mag[i+1][j+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[i][j] / deltaY[i][j];
mag1 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j-1];
mag2 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[i][j] < mag1) || (mag[i][j] < mag2))
newImage[i][j] = 0;//SUPRRESSED
else
{
newImage[i][j] = mag[i][j];
}
}
}
}
}
__global__
void apply_hysteresis(gray8_pixel_t **out_pixels, gray8_pixel_t **in_pixels, unsigned char t_high, unsigned char t_low, int width,int height)
{
/* skip first and last rows and columns, since we'll check them as surrounding neighbors of
* the adjacent rows and columns */
for(unsigned i = 1; i < height-1; i++) {
for(unsigned j = 1; j < width-1; j++) {
/* if our input is above the high threshold and the output hasn't already marked it as an edge */
if (out_pixels[i][j] != 0xFF) {
if (in_pixels[i][j] > t_high) {
/* mark as strong edge */
out_pixels[i][j] = 0xff;
/* check 8 immediately surrounding neighbors
* if any of the neighbors are above the low threshold, preserve edge */
trace_immed_neighbors(out_pixels, in_pixels, i,j, t_low);
} else {
out_pixels[i][j] = 0x00;
}
}
}
}
}
__device__
void trace_immed_neighbors(gray8_pixel_t **out_pixels, gray8_pixel_t **in_pixels, unsigned i, unsigned j, unsigned char t_low)
{
unsigned char m_edge= 255;
/* directions representing indices of neighbors */
if (((unsigned char)in_pixels[i-1][j-1] >= t_low) && ((unsigned char)out_pixels[i-1][j-1] != m_edge)) {
out_pixels[i-1][j-1] = m_edge;
}
if (((unsigned char)in_pixels[i-1][j] >= t_low) && ((unsigned char)out_pixels[i-1][j] != m_edge)) {
out_pixels[i-1][j] = m_edge;
}
if (((unsigned char)in_pixels[i-1][j+1] >= t_low) && ((unsigned char)out_pixels[i-1][j+1] != m_edge)) {
out_pixels[i-1][j+1] = m_edge;
}
if (((unsigned char)in_pixels[i][j-1] >= t_low) && ((unsigned char)out_pixels[i][j-1] != m_edge)) {
out_pixels[i][j-1] = m_edge;
}
if (((unsigned char)in_pixels[i][j+1] >= t_low) && ((unsigned char)out_pixels[i][j+1] != m_edge)) {
out_pixels[i][j+1] = m_edge;
}
if (((unsigned char)in_pixels[i+1][j-1] >= t_low) && ((unsigned char)out_pixels[i+1][j-1] != m_edge)) {
out_pixels[i+1][j-1] = m_edge;
}
if (((unsigned char)in_pixels[i+1][j] >= t_low) && ((unsigned char)out_pixels[i+1][j] != m_edge)) {
out_pixels[i+1][j] = m_edge;
}
if (((unsigned char)in_pixels[i+1][j+1] >= t_low) && ((unsigned char)out_pixels[i+1][j+1] != m_edge)) {
out_pixels[i+1][j+1] = m_edge;
}
}
| 9d6ae35232c056f698ed3e4c0bb617c97f926066.cu | #include <boost/gil/rgb.hpp>
#include <boost/gil/extension/io/png_dynamic_io.hpp>
#include <stdint.h>
#include <math.h>
#include <iostream>
#include <vector>
#include "canny_p.h"
using namespace boost::gil;
using namespace std;
/*create a gaussian filter*/
__device__
Matrix createKernel(int height, int width, double sigma)
{
Matrix kernel(height, Array(width));
double sum=0.0;
int i,j;
for (i=0 ; i<height ; i++) {
for (j=0 ; j<width ; j++) {
kernel[i][j] = exp(-(i*i+j*j)/(2*sigma*sigma))/(2*M_PI*sigma*sigma);
sum += kernel[i][j];
}
}
for (i=0 ; i<height ; i++) {
for (j=0 ; j<width ; j++) {
kernel[i][j] /= sum;
}
}
return kernel;
}
/*Step 1 blur the image to reduce noice*/
__global__
void gaussian_filter(gray8_pixel_t **newImage,gray8_pixel_t **in_pixels,int width, int height)
{
Matrix filter = createKernel(5, 5, 10.0);
int filterHeight = filter.size();
int filterWidth = filter[0].size();
int newImageHeight = height-filterHeight;
int newImageWidth = width-filterWidth;
int h,w;
/*allocate newimage*/
int i = threadIdx.x;
int j = threadIdx.y;
//
// for (i=0 ; i<newImageHeight ; i++) {
// for (j=0 ; j<newImageWidth ; j++) {
for (h=i ; h<i+filterHeight ; h++) {
for (w=j ; w<j+filterWidth ; w++) {
newImage[i][j] = newImage[i][j] +filter[h-i][w-j]*in_pixels[h][w];
}
}
__syncthreads();
// }
// }
//__syncthreads();
}
__global__
void gradient(gray8_pixel_t **newImage, gray8_pixel_t **in_pixels, int width, int height,
gray8_pixel_t **deltaX, gray8_pixel_t **deltaY)
{
// compute delta X ***************************
// deltaX = f(x+1) - f(x-1)
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++){
if (j == 0) deltaX[i][j] = in_pixels[i][j + 1] - in_pixels[i][j];
else if (j == width - 1)deltaX[i][j] = in_pixels[i][j] - in_pixels[i][j - 1];
else deltaX[i][j] = in_pixels[i][j + 1] - in_pixels[i][j - 1];
}
}
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
if (i == 0) deltaY[i][j] = in_pixels[i+1][j] - in_pixels[i][j];
else if (i == height - 1)deltaY[i][j] = in_pixels[i][j] - in_pixels[i-1][j];
else deltaY[i][j] = in_pixels[i+1][j] - in_pixels[i-1][j];
}
}
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
newImage[i][j] = (gray8_pixel_t)(sqrt((double)deltaX[i][j] * deltaX[i][j] +
(double)deltaY[i][j] * deltaY[i][j]) + 0.5);
}
}
}
__global__
void suppress(gray8_pixel_t **newImage, gray8_pixel_t **mag, int width, int height,
gray8_pixel_t **deltaX, gray8_pixel_t **deltaY)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
for (int k = 0; k < height; ++k) {
newImage[height - 1][k] = 0;
newImage[0][k] = 0;
newImage[k][0] = 0;
newImage[k][width - 1]= 0;
}
// skip boundaries of image
// start and stop 1 pixel inner pixels from boundaries
for(unsigned i = 1; i < height-1; i++)
{
for(unsigned j = 1; j < width-1; j++)
{
// if magnitude = 0, no edge
if(mag[i][j] == 0) newImage[i][j] = 0;//suppressed
else{
if(deltaX[i][j] >= 0)
{
if(deltaY[i][j] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[i][j] - deltaY[i][j]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[i][j] / deltaX[i][j];
mag1 = (1-alpha)*mag[i][j+1] + alpha*mag[i+1][j+1];
mag2 = (1-alpha)*mag[i][j-1] + alpha*mag[i-1][j-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[i][j] / deltaY[i][j];
mag1 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j+1];
mag2 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[i][j] + deltaY[i][j]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[i][j] / deltaX[i][j];
mag1 = (1-alpha)*mag[i][j+1] + alpha*mag[i-1][j+1];
mag2 = (1-alpha)*mag[i][j-1] + alpha*mag[i+1][j-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[i][j] / -deltaY[i][j];
mag1 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j-1];
mag2 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j+1];
}
}
}
else
{
if(deltaY[i][j] >= 0) // dx < 0, dy >= 0
{
if((deltaX[i][j] - deltaY[i][j]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[i][j] / deltaY[i][j];
mag1 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j-1];
mag2 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[i][j] / -deltaX[i][j];
mag1 = (1-alpha)*mag[i][j-1] + alpha*mag[i+1][j-1];
mag2 = (1-alpha)*mag[i][j+1] + alpha*mag[i-1][j+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[i][j] + deltaY[i][j]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[i][j] / deltaX[i][j];
mag1 = (1-alpha)*mag[i][j-1] + alpha*mag[i-1][j-1];
mag2 = (1-alpha)*mag[i][j+1] + alpha*mag[i+1][j+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[i][j] / deltaY[i][j];
mag1 = (1-alpha)*mag[i-1][j] + alpha*mag[i-1][j-1];
mag2 = (1-alpha)*mag[i+1][j] + alpha*mag[i+1][j+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[i][j] < mag1) || (mag[i][j] < mag2))
newImage[i][j] = 0;//SUPRRESSED
else
{
newImage[i][j] = mag[i][j];
}
}
}
}
}
__global__
void apply_hysteresis(gray8_pixel_t **out_pixels, gray8_pixel_t **in_pixels, unsigned char t_high, unsigned char t_low, int width,int height)
{
/* skip first and last rows and columns, since we'll check them as surrounding neighbors of
* the adjacent rows and columns */
for(unsigned i = 1; i < height-1; i++) {
for(unsigned j = 1; j < width-1; j++) {
/* if our input is above the high threshold and the output hasn't already marked it as an edge */
if (out_pixels[i][j] != 0xFF) {
if (in_pixels[i][j] > t_high) {
/* mark as strong edge */
out_pixels[i][j] = 0xff;
/* check 8 immediately surrounding neighbors
* if any of the neighbors are above the low threshold, preserve edge */
trace_immed_neighbors(out_pixels, in_pixels, i,j, t_low);
} else {
out_pixels[i][j] = 0x00;
}
}
}
}
}
__device__
void trace_immed_neighbors(gray8_pixel_t **out_pixels, gray8_pixel_t **in_pixels, unsigned i, unsigned j, unsigned char t_low)
{
unsigned char m_edge= 255;
/* directions representing indices of neighbors */
if (((unsigned char)in_pixels[i-1][j-1] >= t_low) && ((unsigned char)out_pixels[i-1][j-1] != m_edge)) {
out_pixels[i-1][j-1] = m_edge;
}
if (((unsigned char)in_pixels[i-1][j] >= t_low) && ((unsigned char)out_pixels[i-1][j] != m_edge)) {
out_pixels[i-1][j] = m_edge;
}
if (((unsigned char)in_pixels[i-1][j+1] >= t_low) && ((unsigned char)out_pixels[i-1][j+1] != m_edge)) {
out_pixels[i-1][j+1] = m_edge;
}
if (((unsigned char)in_pixels[i][j-1] >= t_low) && ((unsigned char)out_pixels[i][j-1] != m_edge)) {
out_pixels[i][j-1] = m_edge;
}
if (((unsigned char)in_pixels[i][j+1] >= t_low) && ((unsigned char)out_pixels[i][j+1] != m_edge)) {
out_pixels[i][j+1] = m_edge;
}
if (((unsigned char)in_pixels[i+1][j-1] >= t_low) && ((unsigned char)out_pixels[i+1][j-1] != m_edge)) {
out_pixels[i+1][j-1] = m_edge;
}
if (((unsigned char)in_pixels[i+1][j] >= t_low) && ((unsigned char)out_pixels[i+1][j] != m_edge)) {
out_pixels[i+1][j] = m_edge;
}
if (((unsigned char)in_pixels[i+1][j+1] >= t_low) && ((unsigned char)out_pixels[i+1][j+1] != m_edge)) {
out_pixels[i+1][j+1] = m_edge;
}
}
|
0f1215e1336eb4e1528860f37eb6c27bc9699b70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "3D_AB.cuh"
#include "constants.cuh"
#include <iostream>
#include <vector>
#include <iterator>
#include <fstream>
#include <cstdlib>
F3DAB::F3DAB( uint _ps, std::string _seq ):Benchmarks()
{
size_t protein_length = findSequence(_seq);
if( protein_length == 0 ){
std::cout << "Protein sequence not found on 3D_AB.cu at line 15." << std::endl;
exit(EXIT_FAILURE);
}
// number of individuals
ps = _ps;
min = -3.1415926535897932384626433832795029;
max = +3.1415926535897932384626433832795029;
ID = 1001;
// get the next multiple of 32;
NT.x = 32 * ceil((double) protein_length / 32.0);
//one block per population member
NB.x = ps;
// printf("nb: %d e nt: %d\n", n_blocks, n_threads);
char s_2dab[150];
memset(s_2dab, 0, sizeof(char) * 150);
strcpy(s_2dab, getSequence(_seq).c_str());
// printf("Optimizing sequence: %s\n", s_2dab);
checkCudaErrors(hipMemcpyToSymbol(S_AB, (void *) s_2dab, 150 * sizeof(char)));
checkCudaErrors(hipMemcpyToSymbol(PL, &protein_length, sizeof(int)));
}
F3DAB::~F3DAB()
{
/* empty */
}
inline __host__ __device__ float3 operator-(float3 a, float3 b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
__global__ void computeK_3DAB_P(float * x, float * f){
uint id_p = blockIdx.x;
uint id_d = threadIdx.x;
uint ndim = params.n_dim;
int N = PL;
uint THETA = id_p * ndim;
uint BETA = id_p * ndim + (N-2);
__shared__ float3 points[128];
if( id_d == 0 ){
points[0] = make_float3(0.0f, 0.0f, 0.0f);
points[1] = make_float3(0.0f, 1.0f, 0.0f);
points[2] = make_float3(cosf(x[THETA]), 1.0 + sinf(x[THETA]), 0.0f);
float3 aux = points[2];
for( uint16_t i = 3; i < N; i++ ){
aux.x += cosf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.y += sinf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.z += sinf(x[BETA + i - 3]);
points[i] = aux;
}
}
__shared__ float v1[128], v2[128];
v1[id_d] = 0.0;
v2[id_d] = 0.0;
__syncthreads();
// if( id_d == 0 ){
// printf("Pontos: \n");
// for( uint16_t i = 0; i < N; i++ ){
// printf("%.3f %.3f %.3f\n", points[i].x, points[i].y, points[i].z);
// }
// }
float C, n3df, _v2;
if( id_d < (N - 2) ){
v1[id_d] = (1.0f - cosf(x[THETA + id_d]));
float3 P1 = points[id_d];
_v2 = 0.0;
for( uint16_t j = (id_d + 2); j < N; j++ ){
if( S_AB[id_d] == 'A' && S_AB[j] == 'A' )
C = 1.0;
else if( S_AB[id_d] == 'B' && S_AB[j] == 'B' )
C = 0.5;
else
C = -0.5;
float3 D = P1 - points[j];
n3df = norm3df(D.x, D.y, D.z);
_v2 += ( 1.0 / powf(n3df, 12.0) - C / powf(n3df, 6.0) );
}
v2[id_d] = _v2;
}
__syncthreads();
if( id_d < 64 && N > 64 ){
v1[id_d] += v1[id_d + 64];
v2[id_d] += v2[id_d + 64];
}
__syncthreads();
if( id_d < 32 && N > 32 ){
v1[id_d] += v1[id_d + 32];
v2[id_d] += v2[id_d + 32];
}
__syncthreads();
if( id_d < 16 && N > 16 ){
v1[id_d] += v1[id_d + 16];
v2[id_d] += v2[id_d + 16];
}
__syncthreads();
if( id_d < 8 ){
v1[id_d] += v1[id_d + 8];
v2[id_d] += v2[id_d + 8];
}
__syncthreads();
if( id_d < 4 ){
v1[id_d] += v1[id_d + 4];
v2[id_d] += v2[id_d + 4];
}
__syncthreads();
if( id_d < 2 ){
v1[id_d] += v1[id_d + 2];
v2[id_d] += v2[id_d + 2];
}
__syncthreads();
if( id_d == 0 ){
v1[id_d] += v1[id_d + 1];
v2[id_d] += v2[id_d + 1];
f[id_p] = (v1[0] / 4.0) + (v2[0] * 4.0);
// printf("v1: %.4lf v2: %.4lf\n", v1[0]/4, 4*v2[0]);
// printf("Final energy value: %.8lf\n", v1[0]/4 + 4*v2[0]);
}
}
__global__ void computeK_3DAB_S(float *x, float *f){
uint id_p = threadIdx.x + (blockIdx.x * blockDim.x);
uint ps = params.ps;
uint ndim = params.n_dim;
int N = PL;
if( id_p < ps ){
uint THETA = id_p * ndim;
uint BETA = id_p * ndim + (N-2);
float3 points[128];
points[0] = make_float3(0.0f, 0.0f, 0.0f);
points[1] = make_float3(0.0f, 1.0f, 0.0f);
points[2] = make_float3(cosf(x[THETA + 0]), 1 + sinf(x[THETA + 0]), 0.0f);
float3 aux = points[2];
for( uint16_t i = 3; i < N; i++ ){
aux.x += cosf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.y += sinf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.z += sinf(x[BETA + i - 3]);
points[i] = aux;
}
__syncthreads();
// printf("Pontos: \n");
// for( uint16_t i = 0; i < N; i++ ){
// printf("%.3f %.3f %.3f\n", points[i].x, points[i].y, points[i].z);
// }
float v1 = 0.0, v2 = 0.0, C, n3df;
for( uint16_t i = 0; i < N-2; i++ ){
v1 += (1.0f - cosf(x[THETA + i]));
float3 P1 = points[i];
for( uint16_t j = i + 2; j < N; j++ ){
if( S_AB[i] == 'A' && S_AB[j] == 'A' ){
C = 1;
} else if( S_AB[i] == 'B' && S_AB[j] == 'B' ){
C = 0.5;
} else {
C = -0.5;
}
float3 D = P1 - points[j];
n3df = norm3df(D.x, D.y, D.z);
v2 += ( 1.0f / powf(n3df, 12.0f) - C / powf(n3df, 6.0f) );
}
}
// printf("v1: %.4f v2: %.4f\n", v1/4, 4*v2);
// printf("Final energy value: %.8lf\n", v1/4 + 4*v2);
f[id_p] = (v1 / 4.0) + (4.0 * v2);
}
}
void F3DAB::compute(float * x, float * f){
hipLaunchKernelGGL(( computeK_3DAB_P), dim3(NB), dim3(NT) , 0, 0, x, f);
checkCudaErrors(hipGetLastError());
}
| 0f1215e1336eb4e1528860f37eb6c27bc9699b70.cu | #include "3D_AB.cuh"
#include "constants.cuh"
#include <iostream>
#include <vector>
#include <iterator>
#include <fstream>
#include <cstdlib>
F3DAB::F3DAB( uint _ps, std::string _seq ):Benchmarks()
{
size_t protein_length = findSequence(_seq);
if( protein_length == 0 ){
std::cout << "Protein sequence not found on 3D_AB.cu at line 15." << std::endl;
exit(EXIT_FAILURE);
}
// number of individuals
ps = _ps;
min = -3.1415926535897932384626433832795029;
max = +3.1415926535897932384626433832795029;
ID = 1001;
// get the next multiple of 32;
NT.x = 32 * ceil((double) protein_length / 32.0);
//one block per population member
NB.x = ps;
// printf("nb: %d e nt: %d\n", n_blocks, n_threads);
char s_2dab[150];
memset(s_2dab, 0, sizeof(char) * 150);
strcpy(s_2dab, getSequence(_seq).c_str());
// printf("Optimizing sequence: %s\n", s_2dab);
checkCudaErrors(cudaMemcpyToSymbol(S_AB, (void *) s_2dab, 150 * sizeof(char)));
checkCudaErrors(cudaMemcpyToSymbol(PL, &protein_length, sizeof(int)));
}
F3DAB::~F3DAB()
{
/* empty */
}
inline __host__ __device__ float3 operator-(float3 a, float3 b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
__global__ void computeK_3DAB_P(float * x, float * f){
uint id_p = blockIdx.x;
uint id_d = threadIdx.x;
uint ndim = params.n_dim;
int N = PL;
uint THETA = id_p * ndim;
uint BETA = id_p * ndim + (N-2);
__shared__ float3 points[128];
if( id_d == 0 ){
points[0] = make_float3(0.0f, 0.0f, 0.0f);
points[1] = make_float3(0.0f, 1.0f, 0.0f);
points[2] = make_float3(cosf(x[THETA]), 1.0 + sinf(x[THETA]), 0.0f);
float3 aux = points[2];
for( uint16_t i = 3; i < N; i++ ){
aux.x += cosf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.y += sinf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.z += sinf(x[BETA + i - 3]);
points[i] = aux;
}
}
__shared__ float v1[128], v2[128];
v1[id_d] = 0.0;
v2[id_d] = 0.0;
__syncthreads();
// if( id_d == 0 ){
// printf("Pontos: \n");
// for( uint16_t i = 0; i < N; i++ ){
// printf("%.3f %.3f %.3f\n", points[i].x, points[i].y, points[i].z);
// }
// }
float C, n3df, _v2;
if( id_d < (N - 2) ){
v1[id_d] = (1.0f - cosf(x[THETA + id_d]));
float3 P1 = points[id_d];
_v2 = 0.0;
for( uint16_t j = (id_d + 2); j < N; j++ ){
if( S_AB[id_d] == 'A' && S_AB[j] == 'A' )
C = 1.0;
else if( S_AB[id_d] == 'B' && S_AB[j] == 'B' )
C = 0.5;
else
C = -0.5;
float3 D = P1 - points[j];
n3df = norm3df(D.x, D.y, D.z);
_v2 += ( 1.0 / powf(n3df, 12.0) - C / powf(n3df, 6.0) );
}
v2[id_d] = _v2;
}
__syncthreads();
if( id_d < 64 && N > 64 ){
v1[id_d] += v1[id_d + 64];
v2[id_d] += v2[id_d + 64];
}
__syncthreads();
if( id_d < 32 && N > 32 ){
v1[id_d] += v1[id_d + 32];
v2[id_d] += v2[id_d + 32];
}
__syncthreads();
if( id_d < 16 && N > 16 ){
v1[id_d] += v1[id_d + 16];
v2[id_d] += v2[id_d + 16];
}
__syncthreads();
if( id_d < 8 ){
v1[id_d] += v1[id_d + 8];
v2[id_d] += v2[id_d + 8];
}
__syncthreads();
if( id_d < 4 ){
v1[id_d] += v1[id_d + 4];
v2[id_d] += v2[id_d + 4];
}
__syncthreads();
if( id_d < 2 ){
v1[id_d] += v1[id_d + 2];
v2[id_d] += v2[id_d + 2];
}
__syncthreads();
if( id_d == 0 ){
v1[id_d] += v1[id_d + 1];
v2[id_d] += v2[id_d + 1];
f[id_p] = (v1[0] / 4.0) + (v2[0] * 4.0);
// printf("v1: %.4lf v2: %.4lf\n", v1[0]/4, 4*v2[0]);
// printf("Final energy value: %.8lf\n", v1[0]/4 + 4*v2[0]);
}
}
__global__ void computeK_3DAB_S(float *x, float *f){
uint id_p = threadIdx.x + (blockIdx.x * blockDim.x);
uint ps = params.ps;
uint ndim = params.n_dim;
int N = PL;
if( id_p < ps ){
uint THETA = id_p * ndim;
uint BETA = id_p * ndim + (N-2);
float3 points[128];
points[0] = make_float3(0.0f, 0.0f, 0.0f);
points[1] = make_float3(0.0f, 1.0f, 0.0f);
points[2] = make_float3(cosf(x[THETA + 0]), 1 + sinf(x[THETA + 0]), 0.0f);
float3 aux = points[2];
for( uint16_t i = 3; i < N; i++ ){
aux.x += cosf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.y += sinf(x[THETA + i - 2]) * cosf(x[BETA + i - 3]);
aux.z += sinf(x[BETA + i - 3]);
points[i] = aux;
}
__syncthreads();
// printf("Pontos: \n");
// for( uint16_t i = 0; i < N; i++ ){
// printf("%.3f %.3f %.3f\n", points[i].x, points[i].y, points[i].z);
// }
float v1 = 0.0, v2 = 0.0, C, n3df;
for( uint16_t i = 0; i < N-2; i++ ){
v1 += (1.0f - cosf(x[THETA + i]));
float3 P1 = points[i];
for( uint16_t j = i + 2; j < N; j++ ){
if( S_AB[i] == 'A' && S_AB[j] == 'A' ){
C = 1;
} else if( S_AB[i] == 'B' && S_AB[j] == 'B' ){
C = 0.5;
} else {
C = -0.5;
}
float3 D = P1 - points[j];
n3df = norm3df(D.x, D.y, D.z);
v2 += ( 1.0f / powf(n3df, 12.0f) - C / powf(n3df, 6.0f) );
}
}
// printf("v1: %.4f v2: %.4f\n", v1/4, 4*v2);
// printf("Final energy value: %.8lf\n", v1/4 + 4*v2);
f[id_p] = (v1 / 4.0) + (4.0 * v2);
}
}
void F3DAB::compute(float * x, float * f){
computeK_3DAB_P<<< NB, NT >>>(x, f);
checkCudaErrors(cudaGetLastError());
}
|
55374989d482c077ee14649d568e913916ea20ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#define GRID_SIZE 32
#define BLOCK_SIZE 512
#define NUM_TRY 10000
/**
* (x,y)devResults
*/
__global__
void compute_pi(float* devResults, float* devRandom){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step = gridDim.x * blockDim.x * 2;
int count = 0;
for (int iter = 0; iter < NUM_TRY; ++iter) {
// (x,y)
float x = devRandom[iter * step + idx * 2];
float y = devRandom[iter * step + idx * 2 + 1];
//
if (x * x + y * y <= 1) {
count++;
}
}
devResults[idx] = (float)count / NUM_TRY;
}
int main()
{
float* results;
float* devResults;
hiprandGenerator_t gen;
float *devRandom;
// CPU
results = new float[GRID_SIZE * BLOCK_SIZE];
// GPU
hipMalloc((void**)&devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE);
hipMalloc((void**)&devRandom, sizeof(float) * GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2);
//
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
//
hiprandGenerateUniform(gen, devRandom, GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2);
// GPU
hipLaunchKernelGGL(( compute_pi), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, devResults, devRandom);
// sizeGPUd_bufferCPUbuffer
hipMemcpy(results, devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE, hipMemcpyDeviceToHost);
// GPU
hipFree(devResults);
hipFree(devRandom);
//
float count = 0.0;
for (int i = 0; i < GRID_SIZE * BLOCK_SIZE; ++i) {
count += results[i];
}
printf("PI: %lf\n", count * 4.0 / GRID_SIZE / BLOCK_SIZE);
// CPU
free(results);
hipDeviceReset();
}
| 55374989d482c077ee14649d568e913916ea20ef.cu | #include <stdio.h>
#include <curand.h>
#define GRID_SIZE 32
#define BLOCK_SIZE 512
#define NUM_TRY 10000
/**
* 乱数に基づいて(x,y)を生成し、円の中に入る確率を計算し、devResultsに格納する。
*/
__global__
void compute_pi(float* devResults, float* devRandom){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step = gridDim.x * blockDim.x * 2;
int count = 0;
for (int iter = 0; iter < NUM_TRY; ++iter) {
// 乱数に基づいて(x,y)を生成
float x = devRandom[iter * step + idx * 2];
float y = devRandom[iter * step + idx * 2 + 1];
// 円の中に入っているかチェック
if (x * x + y * y <= 1) {
count++;
}
}
devResults[idx] = (float)count / NUM_TRY;
}
int main()
{
float* results;
float* devResults;
curandGenerator_t gen;
float *devRandom;
// CPU側でメモリを確保する
results = new float[GRID_SIZE * BLOCK_SIZE];
// GPU側でメモリを確保する
cudaMalloc((void**)&devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE);
cudaMalloc((void**)&devRandom, sizeof(float) * GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2);
// 乱数生成器を作成
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// 乱数を生成し、デバイス側のバッファに格納する
curandGenerateUniform(gen, devRandom, GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2);
// GPU側の関数を呼び出す。()内が、そのまま関数の引数となる
compute_pi<<<GRID_SIZE, BLOCK_SIZE>>>(devResults, devRandom);
// 指定したsize分、GPUのd_bufferから、CPUのbufferへ、データを転送する
cudaMemcpy(results, devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE, cudaMemcpyDeviceToHost);
// GPU側で確保したメモリを開放する
cudaFree(devResults);
cudaFree(devRandom);
// 結果を表示する
float count = 0.0;
for (int i = 0; i < GRID_SIZE * BLOCK_SIZE; ++i) {
count += results[i];
}
printf("PI: %lf\n", count * 4.0 / GRID_SIZE / BLOCK_SIZE);
// CPU側で確保したメモリを開放する
free(results);
cudaDeviceReset();
}
|
f2712688918e7f29df40e861a4874d39760faba7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../cudaGraph/algos/cudaCD.hpp"
namespace cudaGraph
{
__global__ void
computeInDegree(int* d_startIndices, int* d_endIndices, int* d_edges,
int* d_inDegree, int vertexCount)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < vertexCount)
{
int startIndex = d_startIndices[tid];
int endIndex = d_endIndices[tid];
for (int i = startIndex; i < endIndex; i++)
{
int neighbor = d_edges[i];
atomicAdd(&d_inDegree[neighbor], 1);
}
}
}
__global__ void
loadQueue(int* d_inDegree, int* d_currentQueue, int* nextQueueSize, int vertexCount)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < vertexCount && d_inDegree[tid] == 0)
{
int queuePosition = atomicAdd(nextQueueSize, 1);
d_currentQueue[queuePosition] = tid;
}
}
__global__ void
clearQueue(int* d_startIndices, int* d_endIndices, int* d_edges,
int* d_inDegree, int* d_currentQueue, int* d_nextQueue,
int* visitedCount, int* nextQueueSize, int currentQueueSize)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < currentQueueSize)
{
atomicAdd(visitedCount, 1);
int startIndex = d_startIndices[d_currentQueue[tid]];
int endIndex = d_endIndices[d_currentQueue[tid]];
for (int i = startIndex; i < endIndex; i++)
{
int neighbor = d_edges[i];
atomicSub(&d_inDegree[neighbor], 1);
if (d_inDegree[neighbor] == 0)
{
int queuePosition = atomicAdd(nextQueueSize, 1);
d_nextQueue[queuePosition] = neighbor;
}
}
}
}
bool launchHasCycle(Graph &g)
{
int vertexCount = g.vertices.size();
int edgeCount = g.edges.size();
int sizeOfVertices = vertexCount * sizeof(int);
int block = 1024;
int grid = (vertexCount / 1024) + 1;
int* d_inDegree = NULL;
int* d_currentQueue = NULL;
int* d_nextQueue = NULL;
int* visitedCount;
int* nextQueueSize;
int currentQueueSize;
checkError(hipMalloc(&d_inDegree, sizeOfVertices));
checkError(hipMalloc(&d_currentQueue, sizeOfVertices));
checkError(hipMalloc(&d_nextQueue, sizeOfVertices));
checkError(hipHostMalloc((void**) &nextQueueSize, sizeof(int)));
checkError(hipHostMalloc((void**) &visitedCount, sizeof(int)));
hipLaunchKernelGGL(( computeInDegree), dim3(grid), dim3(block), 0, 0, g.d_startIndices, g.d_endIndices, g.d_edges, d_inDegree, vertexCount);
hipDeviceSynchronize();
hipLaunchKernelGGL(( loadQueue), dim3(grid), dim3(block), 0, 0, d_inDegree, d_currentQueue, nextQueueSize, vertexCount);
hipDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
*visitedCount = 0;
while (currentQueueSize > 0)
{
grid = (currentQueueSize / 1024) + 1;
hipLaunchKernelGGL(( clearQueue), dim3(grid), dim3(block), 0, 0, g.d_startIndices, g.d_endIndices, g.d_edges,
d_inDegree, d_currentQueue, d_nextQueue,
visitedCount, nextQueueSize, currentQueueSize);
hipDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
std::swap(d_currentQueue, d_nextQueue);
}
checkError(hipFree(d_inDegree));
checkError(hipFree(d_currentQueue));
checkError(hipFree(d_nextQueue));
if (*visitedCount != vertexCount)
{
return true;
}
else
{
return false;
}
}
float launchTimedHasCycle(Graph &g)
{
hipEvent_t start, stop;
float time;
int vertexCount = g.vertices.size();
int edgeCount = g.edges.size();
int sizeOfVertices = vertexCount * sizeof(int);
int block = 1024;
int grid = (vertexCount / 1024) + 1;
int* d_inDegree = NULL;
int* d_currentQueue = NULL;
int* d_nextQueue = NULL;
int* visitedCount;
int* nextQueueSize;
int currentQueueSize;
checkError(hipEventCreate(&start));
checkError(hipEventCreate(&stop));
hipEventRecord(start);
checkError(hipMalloc(&d_inDegree, sizeOfVertices));
checkError(hipMalloc(&d_currentQueue, sizeOfVertices));
checkError(hipMalloc(&d_nextQueue, sizeOfVertices));
checkError(hipHostMalloc((void**) &nextQueueSize, sizeof(int)));
checkError(hipHostMalloc((void**) &visitedCount, sizeof(int)));
hipLaunchKernelGGL(( computeInDegree), dim3(grid), dim3(block), 0, 0, g.d_startIndices, g.d_endIndices, g.d_edges, d_inDegree, vertexCount);
hipDeviceSynchronize();
hipLaunchKernelGGL(( loadQueue), dim3(grid), dim3(block), 0, 0, d_inDegree, d_currentQueue, nextQueueSize, vertexCount);
hipDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
*visitedCount = 0;
while (currentQueueSize > 0)
{
grid = (currentQueueSize / 1024) + 1;
hipLaunchKernelGGL(( clearQueue), dim3(grid), dim3(block), 0, 0, g.d_startIndices, g.d_endIndices, g.d_edges,
d_inDegree, d_currentQueue, d_nextQueue,
visitedCount, nextQueueSize, currentQueueSize);
hipDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
std::swap(d_currentQueue, d_nextQueue);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
checkError(hipFree(d_inDegree));
checkError(hipFree(d_currentQueue));
checkError(hipFree(d_nextQueue));
return time;
}
} | f2712688918e7f29df40e861a4874d39760faba7.cu | #include "../../cudaGraph/algos/cudaCD.hpp"
namespace cudaGraph
{
__global__ void
computeInDegree(int* d_startIndices, int* d_endIndices, int* d_edges,
int* d_inDegree, int vertexCount)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < vertexCount)
{
int startIndex = d_startIndices[tid];
int endIndex = d_endIndices[tid];
for (int i = startIndex; i < endIndex; i++)
{
int neighbor = d_edges[i];
atomicAdd(&d_inDegree[neighbor], 1);
}
}
}
__global__ void
loadQueue(int* d_inDegree, int* d_currentQueue, int* nextQueueSize, int vertexCount)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < vertexCount && d_inDegree[tid] == 0)
{
int queuePosition = atomicAdd(nextQueueSize, 1);
d_currentQueue[queuePosition] = tid;
}
}
__global__ void
clearQueue(int* d_startIndices, int* d_endIndices, int* d_edges,
int* d_inDegree, int* d_currentQueue, int* d_nextQueue,
int* visitedCount, int* nextQueueSize, int currentQueueSize)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < currentQueueSize)
{
atomicAdd(visitedCount, 1);
int startIndex = d_startIndices[d_currentQueue[tid]];
int endIndex = d_endIndices[d_currentQueue[tid]];
for (int i = startIndex; i < endIndex; i++)
{
int neighbor = d_edges[i];
atomicSub(&d_inDegree[neighbor], 1);
if (d_inDegree[neighbor] == 0)
{
int queuePosition = atomicAdd(nextQueueSize, 1);
d_nextQueue[queuePosition] = neighbor;
}
}
}
}
bool launchHasCycle(Graph &g)
{
int vertexCount = g.vertices.size();
int edgeCount = g.edges.size();
int sizeOfVertices = vertexCount * sizeof(int);
int block = 1024;
int grid = (vertexCount / 1024) + 1;
int* d_inDegree = NULL;
int* d_currentQueue = NULL;
int* d_nextQueue = NULL;
int* visitedCount;
int* nextQueueSize;
int currentQueueSize;
checkError(cudaMalloc(&d_inDegree, sizeOfVertices));
checkError(cudaMalloc(&d_currentQueue, sizeOfVertices));
checkError(cudaMalloc(&d_nextQueue, sizeOfVertices));
checkError(cudaMallocHost((void**) &nextQueueSize, sizeof(int)));
checkError(cudaMallocHost((void**) &visitedCount, sizeof(int)));
computeInDegree<<<grid, block>>>(g.d_startIndices, g.d_endIndices, g.d_edges, d_inDegree, vertexCount);
cudaDeviceSynchronize();
loadQueue<<<grid, block>>>(d_inDegree, d_currentQueue, nextQueueSize, vertexCount);
cudaDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
*visitedCount = 0;
while (currentQueueSize > 0)
{
grid = (currentQueueSize / 1024) + 1;
clearQueue<<<grid, block>>>(g.d_startIndices, g.d_endIndices, g.d_edges,
d_inDegree, d_currentQueue, d_nextQueue,
visitedCount, nextQueueSize, currentQueueSize);
cudaDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
std::swap(d_currentQueue, d_nextQueue);
}
checkError(cudaFree(d_inDegree));
checkError(cudaFree(d_currentQueue));
checkError(cudaFree(d_nextQueue));
if (*visitedCount != vertexCount)
{
return true;
}
else
{
return false;
}
}
float launchTimedHasCycle(Graph &g)
{
cudaEvent_t start, stop;
float time;
int vertexCount = g.vertices.size();
int edgeCount = g.edges.size();
int sizeOfVertices = vertexCount * sizeof(int);
int block = 1024;
int grid = (vertexCount / 1024) + 1;
int* d_inDegree = NULL;
int* d_currentQueue = NULL;
int* d_nextQueue = NULL;
int* visitedCount;
int* nextQueueSize;
int currentQueueSize;
checkError(cudaEventCreate(&start));
checkError(cudaEventCreate(&stop));
cudaEventRecord(start);
checkError(cudaMalloc(&d_inDegree, sizeOfVertices));
checkError(cudaMalloc(&d_currentQueue, sizeOfVertices));
checkError(cudaMalloc(&d_nextQueue, sizeOfVertices));
checkError(cudaMallocHost((void**) &nextQueueSize, sizeof(int)));
checkError(cudaMallocHost((void**) &visitedCount, sizeof(int)));
computeInDegree<<<grid, block>>>(g.d_startIndices, g.d_endIndices, g.d_edges, d_inDegree, vertexCount);
cudaDeviceSynchronize();
loadQueue<<<grid, block>>>(d_inDegree, d_currentQueue, nextQueueSize, vertexCount);
cudaDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
*visitedCount = 0;
while (currentQueueSize > 0)
{
grid = (currentQueueSize / 1024) + 1;
clearQueue<<<grid, block>>>(g.d_startIndices, g.d_endIndices, g.d_edges,
d_inDegree, d_currentQueue, d_nextQueue,
visitedCount, nextQueueSize, currentQueueSize);
cudaDeviceSynchronize();
currentQueueSize = *nextQueueSize;
*nextQueueSize = 0;
std::swap(d_currentQueue, d_nextQueue);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkError(cudaFree(d_inDegree));
checkError(cudaFree(d_currentQueue));
checkError(cudaFree(d_nextQueue));
return time;
}
} |
b853b556a6ae79a6dfa67a1c6176005c16c5c5cc.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| b853b556a6ae79a6dfa67a1c6176005c16c5c5cc.cu |
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
2ceefd04fd534c661aa378db328aec4c67a7ebdd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**/#include "pointwise_scores.cuh"
#include "score_calcers.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <cmath>
#include <exception>
#include <cfloat>
namespace NKernel {
template <int BLOCK_SIZE>
__global__ void FindOptimalSplitSolarImpl(const TCBinFeature* bf,
int binFeatureCount,
const float* binSums,
const TPartitionStatistics* parts,
int pCount, int foldCount,
TBestSplitProperties* result)
{
float bestScore = FLT_MAX;
int bestIndex = 0;
int tid = threadIdx.x;
result += blockIdx.x;
TPointwisePartOffsetsHelper helper(foldCount);
for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) {
if (i + tid >= binFeatureCount) {
break;
}
if (bf[i + tid].SkipInScoreCount) {
continue;
}
const float* current = binSums + 2 * (i + tid);
float score = 0;
for (int leaf = 0; leaf < pCount; leaf++) {
float leftTotalWeight = 0;
float rightTotalWeight = 0;
float leftScore = 0;
float rightScore = 0;
#pragma unroll 4
for (int fold = 0; fold < foldCount; fold += 2) {
TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold));
TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1));
float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2];
float weightEstimateRight = partLearn.Weight - weightEstimateLeft;
float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1];
float sumEstimateRight = partLearn.Sum - sumEstimateLeft;
float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2];
float weightTestRight = partTest.Weight - weightTestLeft;
float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1];
float sumTestRight = partTest.Sum - sumTestLeft;
{
const float mu = weightEstimateLeft > 0.0f ? (sumEstimateLeft / (weightEstimateLeft + 1e-15f)) : 0;
leftScore += -2 * mu * sumTestLeft + weightTestLeft * mu * mu;
leftTotalWeight += weightTestLeft;
}
{
const float mu = weightEstimateRight > 0.0f ? (sumEstimateRight / (weightEstimateRight + 1e-15f)) : 0;
rightTotalWeight += weightTestRight;
rightScore += -2 * mu * sumTestRight + weightTestRight * mu * mu;
}
}
score += leftTotalWeight > 2 ? leftScore * (1 + 2 * log(leftTotalWeight + 1)) : 0;
score += rightTotalWeight > 2 ? rightScore * (1 + 2 * log(rightTotalWeight + 1)) : 0;
}
if (score < bestScore) {
bestScore = score;
bestIndex = i + tid;
}
}
__shared__ float scores[BLOCK_SIZE];
scores[tid] = bestScore;
__shared__ int indices[BLOCK_SIZE];
indices[tid] = bestIndex;
__syncthreads();
for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( scores[tid] > scores[tid + s] ||
(scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) {
scores[tid] = scores[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
if (!tid) {
const int index = indices[0];
result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0;
result->BinId = index < binFeatureCount ? bf[index].BinId : 0;
result->Score = scores[0];
}
}
class TDirectHistLoader {
public:
__forceinline__ __device__ TDirectHistLoader(const float* binSums,
TPointwisePartOffsetsHelper& helper,
int binFeatureId,
int /* leaf count*/,
int binFeatureCount)
: BinSums(binSums + 2 * binFeatureId)
, Helper(helper)
, BinFeatureCount(binFeatureCount) {
}
__forceinline__ __device__ float LoadWeight(int leaf) {
return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2];
}
__forceinline__ __device__ float LoadSum(int leaf) {
return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2 + 1];
}
private:
const float* BinSums;
TPointwisePartOffsetsHelper& Helper;
int BinFeatureCount;
};
class TGatheredByLeavesHistLoader {
public:
__forceinline__ __device__ TGatheredByLeavesHistLoader(const float* binSums,
TPointwisePartOffsetsHelper&,
int binFeatureId,
int leafCount,
int /*binFeatureCount*/)
: BinSums(binSums)
, LeafCount(leafCount)
, FeatureId(binFeatureId) {
}
__forceinline__ __device__ int GetOffset(int leaf) {
return 2 * (FeatureId * LeafCount + leaf);
}
__forceinline__ __device__ float LoadWeight(int leaf) {
return BinSums[GetOffset(leaf)];
}
__forceinline__ __device__ float LoadSum(int leaf) {
return BinSums[GetOffset(leaf) + 1];
}
private:
const float* BinSums;
int LeafCount;
int FeatureId;
};
template <int BLOCK_SIZE,
class THistLoader,
class TScoreCalcer>
__global__ void FindOptimalSplitSingleFoldImpl(const TCBinFeature* bf,
int binFeatureCount,
const float* binSums,
const TPartitionStatistics* parts,
int pCount,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = 0;
int tid = threadIdx.x;
result += blockIdx.x;
TPointwisePartOffsetsHelper helper(1);
for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) {
if (i + tid >= binFeatureCount) {
break;
}
if (bf[i + tid].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[i + tid]);
THistLoader histLoader(binSums,
helper,
i + tid,
pCount,
binFeatureCount);
for (int leaf = 0; leaf < pCount; leaf++) {
TPartitionStatistics part = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, 0));
float weightLeft = histLoader.LoadWeight(leaf);
float weightRight = max(part.Weight - weightLeft, 0.0f);
float sumLeft = histLoader.LoadSum(leaf);
float sumRight = static_cast<float>(part.Sum - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
}
const float score = calcer.GetScore();
if (score < bestScore) {
bestScore = score;
bestIndex = i + tid;
}
}
__shared__ float scores[BLOCK_SIZE];
scores[tid] = bestScore;
__shared__ int indices[BLOCK_SIZE];
indices[tid] = bestIndex;
__syncthreads();
for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( scores[tid] > scores[tid + s] ||
(scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) {
scores[tid] = scores[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
if (!tid) {
const int index = indices[0];
result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0;
result->BinId = index < binFeatureCount ? bf[index].BinId : 0;
result->Score = scores[0];
}
}
template <int BLOCK_SIZE>
__global__ void FindOptimalSplitCosineImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums,
const TPartitionStatistics* parts, int pCount, int foldCount,
double l2, bool normalize,
double scoreStdDev, ui64 globalSeed,
TBestSplitProperties* result)
{
float bestScore = FLT_MAX;
int bestIndex = 0;
int tid = threadIdx.x;
result += blockIdx.x;
TPointwisePartOffsetsHelper helper(foldCount);
for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) {
if (i + tid >= binFeatureCount) {
break;
}
if (bf[i + tid].SkipInScoreCount) {
continue;
}
float score = 0;
float denumSqr = 1e-20f;
const float* current = binSums + 2 * (i + tid);
for (int leaf = 0; leaf < pCount; leaf++) {
#pragma unroll 4
for (int fold = 0; fold < foldCount; fold += 2) {
TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold));
TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1));
float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2];
float weightEstimateRight = max(partLearn.Weight - weightEstimateLeft, 0.0f);
float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1];
float sumEstimateRight = partLearn.Sum - sumEstimateLeft;
float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2];
float weightTestRight = max(partTest.Weight - weightTestLeft, 0.0f);
float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1];
float sumTestRight = partTest.Sum - sumTestLeft;
{
double lambda = normalize ? l2 * weightEstimateLeft : l2;
const float mu = weightEstimateLeft > 0 ? (sumEstimateLeft / (weightEstimateLeft + lambda)) : 0;
score += sumTestLeft * mu;
denumSqr += weightTestLeft * mu * mu;
}
{
double lambda = normalize ? l2 * weightEstimateRight : l2;
const float mu = weightEstimateRight > 0 ? (sumEstimateRight / (weightEstimateRight + lambda)) : 0;
score += sumTestRight * mu;
denumSqr += weightTestRight * mu * mu;
}
}
}
score = denumSqr > 1e-15f ? -score / sqrt(denumSqr) : FLT_MAX;
float tmp = score;
if (scoreStdDev) {
ui64 seed = globalSeed + bf[i + tid].FeatureId;
AdvanceSeed(&seed, 4);
tmp += NextNormal(&seed) * scoreStdDev;
}
if (tmp < bestScore) {
bestScore = tmp;
bestIndex = i + tid;
}
}
__shared__ float scores[BLOCK_SIZE];
scores[tid] = bestScore;
__shared__ int indices[BLOCK_SIZE];
indices[tid] = bestIndex;
__syncthreads();
for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if (scores[tid] > scores[tid + s] ||
(scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) {
scores[tid] = scores[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
if (!tid) {
const int index = indices[0];
result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0;
result->BinId = index < binFeatureCount ? bf[index].BinId : 0;
result->Score = scores[0];
}
}
void FindOptimalSplitDynamic(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount,
const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount,
TBestSplitProperties* result, ui32 resultSize,
EScoreFunction scoreFunction, double l2, bool normalize,
double scoreStdDev, ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
FindOptimalSplitSolarImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result);
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
FindOptimalSplitCosineImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, l2, normalize, scoreStdDev, seed, result);
break;
}
default: {
throw std::exception();
}
}
}
template <class TLoader>
void FindOptimalSplitPlain(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount,
const float* splits, const TPartitionStatistics* parts, ui32 pCount,
TBestSplitProperties* result, ui32 resultSize,
EScoreFunction scoreFunction, double l2, bool normalize,
double scoreStdDev, ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
#define RUN() \
FindOptimalSplitSingleFoldImpl<blockSize, TLoader, TScoreCalcer> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
void FindOptimalSplit(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount,
const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount,
TBestSplitProperties* result, ui32 resultSize,
EScoreFunction scoreFunction, double l2, bool normalize,
double scoreStdDev, ui64 seed, bool gatheredByLeaves,
TCudaStream stream)
{
if (foldCount == 1) {
if (gatheredByLeaves) {
using THistLoader = TGatheredByLeavesHistLoader;
FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream);
} else {
using THistLoader = TDirectHistLoader;
FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream);
}
} else {
FindOptimalSplitDynamic(binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream);
}
}
template <int BLOCK_SIZE, int HIST_COUNT>
__global__ void GatherHistogramsByLeavesImpl(const int binFeatureCount,
const float* histogram,
const int histCount,
const int leafCount,
const int foldCount,
float* result) {
const int featuresPerBlock = BLOCK_SIZE / leafCount;
const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / leafCount;
const int leafId = threadIdx.x & (leafCount - 1);
const int foldId = blockIdx.y;
TPointwisePartOffsetsHelper helper(gridDim.y);
if (featureId < binFeatureCount) {
float leafVals[HIST_COUNT];
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
leafVals[histId] = LdgWithFallback(histogram,
(featureId + (size_t)binFeatureCount * helper.GetHistogramOffset(leafId, foldId)) * HIST_COUNT + histId);
}
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
const ui64 idx = ((size_t)featureId * leafCount * foldCount + leafId * foldCount + foldId) * HIST_COUNT + histId;
result[idx] = leafVals[histId];
}
}
}
bool GatherHistogramByLeaves(const float* histogram,
const ui32 binFeatureCount,
const ui32 histCount,
const ui32 leafCount,
const ui32 foldCount,
float* result,
TCudaStream stream
)
{
const int blockSize = 1024;
dim3 numBlocks;
numBlocks.x = (binFeatureCount + (blockSize / leafCount) - 1) / (blockSize / leafCount);
numBlocks.y = foldCount;
numBlocks.z = 1;
switch (histCount) {
case 1: {
hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 1>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result);
return true;
}
case 2: {
hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 2>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result);
return true;
}
case 4: {
hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 4>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result);
return true;
}
default: {
return false;
}
}
}
template <int BLOCK_SIZE>
__global__ void PartitionUpdateImpl(const float* target,
const float* weights,
const float* counts,
const struct TDataPartition* parts,
struct TPartitionStatistics* partStats)
{
const int tid = threadIdx.x;
parts += blockIdx.x;
partStats += blockIdx.x;
const int size = parts->Size;
__shared__ volatile double localBuffer[BLOCK_SIZE];
double tmp = 0;
if (weights != 0) {
localBuffer[tid] = ComputeSum<BLOCK_SIZE>(weights + parts->Offset, size);
__syncthreads();
tmp = Reduce<double, BLOCK_SIZE>(localBuffer);
}
if (tid == 0)
{
partStats->Weight = tmp;
}
tmp = 0;
__syncthreads();
if (target != 0) {
localBuffer[tid] = ComputeSum<BLOCK_SIZE>(target + parts->Offset, size);
__syncthreads();
tmp = Reduce<double, BLOCK_SIZE>(localBuffer);
}
if (tid == 0)
{
partStats->Sum = tmp;
}
tmp = 0;
__syncthreads();
if (counts != 0) {
localBuffer[tid] = ComputeSum<BLOCK_SIZE>(counts + parts->Offset, size);
__syncthreads();
tmp = Reduce<double, BLOCK_SIZE>(localBuffer);
} else {
tmp = size;
}
if (tid == 0) {
partStats->Count = tmp;
}
}
void UpdatePartitionProps(const float* target,
const float* weights,
const float* counts,
const struct TDataPartition* parts,
struct TPartitionStatistics* partStats,
int partsCount,
TCudaStream stream
)
{
const int blockSize = 1024;
if (partsCount) {
PartitionUpdateImpl<blockSize> << < partsCount, blockSize, 0, stream >> > (target, weights, counts, parts, partStats);
}
}
}
| 2ceefd04fd534c661aa378db328aec4c67a7ebdd.cu | /**/#include "pointwise_scores.cuh"
#include "score_calcers.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <cmath>
#include <exception>
#include <cfloat>
namespace NKernel {
template <int BLOCK_SIZE>
__global__ void FindOptimalSplitSolarImpl(const TCBinFeature* bf,
int binFeatureCount,
const float* binSums,
const TPartitionStatistics* parts,
int pCount, int foldCount,
TBestSplitProperties* result)
{
float bestScore = FLT_MAX;
int bestIndex = 0;
int tid = threadIdx.x;
result += blockIdx.x;
TPointwisePartOffsetsHelper helper(foldCount);
for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) {
if (i + tid >= binFeatureCount) {
break;
}
if (bf[i + tid].SkipInScoreCount) {
continue;
}
const float* current = binSums + 2 * (i + tid);
float score = 0;
for (int leaf = 0; leaf < pCount; leaf++) {
float leftTotalWeight = 0;
float rightTotalWeight = 0;
float leftScore = 0;
float rightScore = 0;
#pragma unroll 4
for (int fold = 0; fold < foldCount; fold += 2) {
TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold));
TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1));
float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2];
float weightEstimateRight = partLearn.Weight - weightEstimateLeft;
float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1];
float sumEstimateRight = partLearn.Sum - sumEstimateLeft;
float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2];
float weightTestRight = partTest.Weight - weightTestLeft;
float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1];
float sumTestRight = partTest.Sum - sumTestLeft;
{
const float mu = weightEstimateLeft > 0.0f ? (sumEstimateLeft / (weightEstimateLeft + 1e-15f)) : 0;
leftScore += -2 * mu * sumTestLeft + weightTestLeft * mu * mu;
leftTotalWeight += weightTestLeft;
}
{
const float mu = weightEstimateRight > 0.0f ? (sumEstimateRight / (weightEstimateRight + 1e-15f)) : 0;
rightTotalWeight += weightTestRight;
rightScore += -2 * mu * sumTestRight + weightTestRight * mu * mu;
}
}
score += leftTotalWeight > 2 ? leftScore * (1 + 2 * log(leftTotalWeight + 1)) : 0;
score += rightTotalWeight > 2 ? rightScore * (1 + 2 * log(rightTotalWeight + 1)) : 0;
}
if (score < bestScore) {
bestScore = score;
bestIndex = i + tid;
}
}
__shared__ float scores[BLOCK_SIZE];
scores[tid] = bestScore;
__shared__ int indices[BLOCK_SIZE];
indices[tid] = bestIndex;
__syncthreads();
for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( scores[tid] > scores[tid + s] ||
(scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) {
scores[tid] = scores[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
if (!tid) {
const int index = indices[0];
result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0;
result->BinId = index < binFeatureCount ? bf[index].BinId : 0;
result->Score = scores[0];
}
}
class TDirectHistLoader {
public:
__forceinline__ __device__ TDirectHistLoader(const float* binSums,
TPointwisePartOffsetsHelper& helper,
int binFeatureId,
int /* leaf count*/,
int binFeatureCount)
: BinSums(binSums + 2 * binFeatureId)
, Helper(helper)
, BinFeatureCount(binFeatureCount) {
}
__forceinline__ __device__ float LoadWeight(int leaf) {
return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2];
}
__forceinline__ __device__ float LoadSum(int leaf) {
return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2 + 1];
}
private:
const float* BinSums;
TPointwisePartOffsetsHelper& Helper;
int BinFeatureCount;
};
class TGatheredByLeavesHistLoader {
public:
__forceinline__ __device__ TGatheredByLeavesHistLoader(const float* binSums,
TPointwisePartOffsetsHelper&,
int binFeatureId,
int leafCount,
int /*binFeatureCount*/)
: BinSums(binSums)
, LeafCount(leafCount)
, FeatureId(binFeatureId) {
}
__forceinline__ __device__ int GetOffset(int leaf) {
return 2 * (FeatureId * LeafCount + leaf);
}
__forceinline__ __device__ float LoadWeight(int leaf) {
return BinSums[GetOffset(leaf)];
}
__forceinline__ __device__ float LoadSum(int leaf) {
return BinSums[GetOffset(leaf) + 1];
}
private:
const float* BinSums;
int LeafCount;
int FeatureId;
};
template <int BLOCK_SIZE,
class THistLoader,
class TScoreCalcer>
__global__ void FindOptimalSplitSingleFoldImpl(const TCBinFeature* bf,
int binFeatureCount,
const float* binSums,
const TPartitionStatistics* parts,
int pCount,
TScoreCalcer calcer,
TBestSplitProperties* result) {
float bestScore = FLT_MAX;
int bestIndex = 0;
int tid = threadIdx.x;
result += blockIdx.x;
TPointwisePartOffsetsHelper helper(1);
for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) {
if (i + tid >= binFeatureCount) {
break;
}
if (bf[i + tid].SkipInScoreCount) {
continue;
}
calcer.NextFeature(bf[i + tid]);
THistLoader histLoader(binSums,
helper,
i + tid,
pCount,
binFeatureCount);
for (int leaf = 0; leaf < pCount; leaf++) {
TPartitionStatistics part = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, 0));
float weightLeft = histLoader.LoadWeight(leaf);
float weightRight = max(part.Weight - weightLeft, 0.0f);
float sumLeft = histLoader.LoadSum(leaf);
float sumRight = static_cast<float>(part.Sum - sumLeft);
calcer.AddLeaf(sumLeft, weightLeft);
calcer.AddLeaf(sumRight, weightRight);
}
const float score = calcer.GetScore();
if (score < bestScore) {
bestScore = score;
bestIndex = i + tid;
}
}
__shared__ float scores[BLOCK_SIZE];
scores[tid] = bestScore;
__shared__ int indices[BLOCK_SIZE];
indices[tid] = bestIndex;
__syncthreads();
for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( scores[tid] > scores[tid + s] ||
(scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) {
scores[tid] = scores[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
if (!tid) {
const int index = indices[0];
result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0;
result->BinId = index < binFeatureCount ? bf[index].BinId : 0;
result->Score = scores[0];
}
}
template <int BLOCK_SIZE>
__global__ void FindOptimalSplitCosineImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums,
const TPartitionStatistics* parts, int pCount, int foldCount,
double l2, bool normalize,
double scoreStdDev, ui64 globalSeed,
TBestSplitProperties* result)
{
float bestScore = FLT_MAX;
int bestIndex = 0;
int tid = threadIdx.x;
result += blockIdx.x;
TPointwisePartOffsetsHelper helper(foldCount);
for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) {
if (i + tid >= binFeatureCount) {
break;
}
if (bf[i + tid].SkipInScoreCount) {
continue;
}
float score = 0;
float denumSqr = 1e-20f;
const float* current = binSums + 2 * (i + tid);
for (int leaf = 0; leaf < pCount; leaf++) {
#pragma unroll 4
for (int fold = 0; fold < foldCount; fold += 2) {
TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold));
TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1));
float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2];
float weightEstimateRight = max(partLearn.Weight - weightEstimateLeft, 0.0f);
float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1];
float sumEstimateRight = partLearn.Sum - sumEstimateLeft;
float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2];
float weightTestRight = max(partTest.Weight - weightTestLeft, 0.0f);
float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1];
float sumTestRight = partTest.Sum - sumTestLeft;
{
double lambda = normalize ? l2 * weightEstimateLeft : l2;
const float mu = weightEstimateLeft > 0 ? (sumEstimateLeft / (weightEstimateLeft + lambda)) : 0;
score += sumTestLeft * mu;
denumSqr += weightTestLeft * mu * mu;
}
{
double lambda = normalize ? l2 * weightEstimateRight : l2;
const float mu = weightEstimateRight > 0 ? (sumEstimateRight / (weightEstimateRight + lambda)) : 0;
score += sumTestRight * mu;
denumSqr += weightTestRight * mu * mu;
}
}
}
score = denumSqr > 1e-15f ? -score / sqrt(denumSqr) : FLT_MAX;
float tmp = score;
if (scoreStdDev) {
ui64 seed = globalSeed + bf[i + tid].FeatureId;
AdvanceSeed(&seed, 4);
tmp += NextNormal(&seed) * scoreStdDev;
}
if (tmp < bestScore) {
bestScore = tmp;
bestIndex = i + tid;
}
}
__shared__ float scores[BLOCK_SIZE];
scores[tid] = bestScore;
__shared__ int indices[BLOCK_SIZE];
indices[tid] = bestIndex;
__syncthreads();
for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if (scores[tid] > scores[tid + s] ||
(scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) {
scores[tid] = scores[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
if (!tid) {
const int index = indices[0];
result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0;
result->BinId = index < binFeatureCount ? bf[index].BinId : 0;
result->Score = scores[0];
}
}
void FindOptimalSplitDynamic(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount,
const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount,
TBestSplitProperties* result, ui32 resultSize,
EScoreFunction scoreFunction, double l2, bool normalize,
double scoreStdDev, ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
FindOptimalSplitSolarImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result);
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
FindOptimalSplitCosineImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, l2, normalize, scoreStdDev, seed, result);
break;
}
default: {
throw std::exception();
}
}
}
template <class TLoader>
void FindOptimalSplitPlain(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount,
const float* splits, const TPartitionStatistics* parts, ui32 pCount,
TBestSplitProperties* result, ui32 resultSize,
EScoreFunction scoreFunction, double l2, bool normalize,
double scoreStdDev, ui64 seed,
TCudaStream stream) {
const int blockSize = 128;
#define RUN() \
FindOptimalSplitSingleFoldImpl<blockSize, TLoader, TScoreCalcer> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, scoreCalcer, result);
switch (scoreFunction)
{
case EScoreFunction::SolarL2: {
using TScoreCalcer = TSolarScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::SatL2: {
using TScoreCalcer = TSatL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::LOOL2: {
using TScoreCalcer = TLOOL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::L2:
case EScoreFunction::NewtonL2: {
using TScoreCalcer = TL2ScoreCalcer;
TScoreCalcer scoreCalcer(static_cast<float>(l2));
RUN()
break;
}
case EScoreFunction::Cosine:
case EScoreFunction::NewtonCosine: {
using TScoreCalcer = TCosineScoreCalcer;
TCosineScoreCalcer scoreCalcer(static_cast<float>(l2),
normalize,
static_cast<float>(scoreStdDev),
seed);
RUN()
break;
}
default: {
throw std::exception();
}
}
#undef RUN
}
void FindOptimalSplit(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount,
const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount,
TBestSplitProperties* result, ui32 resultSize,
EScoreFunction scoreFunction, double l2, bool normalize,
double scoreStdDev, ui64 seed, bool gatheredByLeaves,
TCudaStream stream)
{
if (foldCount == 1) {
if (gatheredByLeaves) {
using THistLoader = TGatheredByLeavesHistLoader;
FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream);
} else {
using THistLoader = TDirectHistLoader;
FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream);
}
} else {
FindOptimalSplitDynamic(binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream);
}
}
template <int BLOCK_SIZE, int HIST_COUNT>
__global__ void GatherHistogramsByLeavesImpl(const int binFeatureCount,
const float* histogram,
const int histCount,
const int leafCount,
const int foldCount,
float* result) {
const int featuresPerBlock = BLOCK_SIZE / leafCount;
const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / leafCount;
const int leafId = threadIdx.x & (leafCount - 1);
const int foldId = blockIdx.y;
TPointwisePartOffsetsHelper helper(gridDim.y);
if (featureId < binFeatureCount) {
float leafVals[HIST_COUNT];
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
leafVals[histId] = LdgWithFallback(histogram,
(featureId + (size_t)binFeatureCount * helper.GetHistogramOffset(leafId, foldId)) * HIST_COUNT + histId);
}
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
const ui64 idx = ((size_t)featureId * leafCount * foldCount + leafId * foldCount + foldId) * HIST_COUNT + histId;
result[idx] = leafVals[histId];
}
}
}
bool GatherHistogramByLeaves(const float* histogram,
const ui32 binFeatureCount,
const ui32 histCount,
const ui32 leafCount,
const ui32 foldCount,
float* result,
TCudaStream stream
)
{
const int blockSize = 1024;
dim3 numBlocks;
numBlocks.x = (binFeatureCount + (blockSize / leafCount) - 1) / (blockSize / leafCount);
numBlocks.y = foldCount;
numBlocks.z = 1;
switch (histCount) {
case 1: {
GatherHistogramsByLeavesImpl<blockSize, 1> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result);
return true;
}
case 2: {
GatherHistogramsByLeavesImpl<blockSize, 2> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result);
return true;
}
case 4: {
GatherHistogramsByLeavesImpl<blockSize, 4> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result);
return true;
}
default: {
return false;
}
}
}
template <int BLOCK_SIZE>
__global__ void PartitionUpdateImpl(const float* target,
const float* weights,
const float* counts,
const struct TDataPartition* parts,
struct TPartitionStatistics* partStats)
{
const int tid = threadIdx.x;
parts += blockIdx.x;
partStats += blockIdx.x;
const int size = parts->Size;
__shared__ volatile double localBuffer[BLOCK_SIZE];
double tmp = 0;
if (weights != 0) {
localBuffer[tid] = ComputeSum<BLOCK_SIZE>(weights + parts->Offset, size);
__syncthreads();
tmp = Reduce<double, BLOCK_SIZE>(localBuffer);
}
if (tid == 0)
{
partStats->Weight = tmp;
}
tmp = 0;
__syncthreads();
if (target != 0) {
localBuffer[tid] = ComputeSum<BLOCK_SIZE>(target + parts->Offset, size);
__syncthreads();
tmp = Reduce<double, BLOCK_SIZE>(localBuffer);
}
if (tid == 0)
{
partStats->Sum = tmp;
}
tmp = 0;
__syncthreads();
if (counts != 0) {
localBuffer[tid] = ComputeSum<BLOCK_SIZE>(counts + parts->Offset, size);
__syncthreads();
tmp = Reduce<double, BLOCK_SIZE>(localBuffer);
} else {
tmp = size;
}
if (tid == 0) {
partStats->Count = tmp;
}
}
void UpdatePartitionProps(const float* target,
const float* weights,
const float* counts,
const struct TDataPartition* parts,
struct TPartitionStatistics* partStats,
int partsCount,
TCudaStream stream
)
{
const int blockSize = 1024;
if (partsCount) {
PartitionUpdateImpl<blockSize> << < partsCount, blockSize, 0, stream >> > (target, weights, counts, parts, partStats);
}
}
}
|
885d6dcd0f906137150cffa6f538c39c29e142df.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "../include/cudaUtility.h"
__global__ void add(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid < 10)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[10], b[10], c[10];
int *dev_a, *dev_b, *dev_c;
CUDA_FAILED(hipMalloc((void **)&dev_a,10*sizeof(int)));
CUDA_FAILED(hipMalloc((void **)&dev_b,10*sizeof(int)));
CUDA_FAILED(hipMalloc((void **)&dev_c,10*sizeof(int)));
for(int i= 0; i< 10; i++){
a[i] = -i;
b[i] = i*i;
}
CUDA_FAILED(hipMemcpy(dev_a,&a,10*sizeof(int),hipMemcpyHostToDevice));
CUDA_FAILED(hipMemcpy(dev_b,&b,10*sizeof(int),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( add), dim3(10),dim3(1), 0, 0, dev_a,dev_b,dev_c);
CUDA_FAILED(hipMemcpy(&c,dev_c,10*sizeof(int),hipMemcpyDeviceToHost));
for(int i= 0; i< 10; i++){
printf("%d + %d = %d\n",a[i],b[i],c[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 885d6dcd0f906137150cffa6f538c39c29e142df.cu | #include <iostream>
#include <cuda_runtime.h>
#include "../include/cudaUtility.h"
__global__ void add(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid < 10)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[10], b[10], c[10];
int *dev_a, *dev_b, *dev_c;
CUDA_FAILED(cudaMalloc((void **)&dev_a,10*sizeof(int)));
CUDA_FAILED(cudaMalloc((void **)&dev_b,10*sizeof(int)));
CUDA_FAILED(cudaMalloc((void **)&dev_c,10*sizeof(int)));
for(int i= 0; i< 10; i++){
a[i] = -i;
b[i] = i*i;
}
CUDA_FAILED(cudaMemcpy(dev_a,&a,10*sizeof(int),cudaMemcpyHostToDevice));
CUDA_FAILED(cudaMemcpy(dev_b,&b,10*sizeof(int),cudaMemcpyHostToDevice));
add<<<10,1>>>(dev_a,dev_b,dev_c);
CUDA_FAILED(cudaMemcpy(&c,dev_c,10*sizeof(int),cudaMemcpyDeviceToHost));
for(int i= 0; i< 10; i++){
printf("%d + %d = %d\n",a[i],b[i],c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
81c422555d6b8977c9eddb7cc09c0f86d06d01ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (32)
#define MATRIX_N (8)
#define MATRIX_K (16)
const int WMMA_M =32;
const int WMMA_N =8;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef half ctype;
typedef half dtype;
typedef float host_type;
#define A_LAYOUT ROW_MAJOR
#define B_LAYOUT ROW_MAJOR
#define C_LAYOUT ROW_MAJOR
#define D_LAYOUT ROW_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
float t;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
for(int i=0; i < a_frag.num_elements; i++) {
t=static_cast<float>(a_frag.x[i]);
printf("A_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < b_frag.num_elements; i++) {
t=static_cast<float>(b_frag.x[i]);
printf("B_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < c_frag.num_elements; i++) {
t=static_cast<float>(c_frag.x[i]);
printf("C_THREAD%d: %.2f \n",threadIdx.x,t);
}
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
hipEvent_t startWMMA;
hipEvent_t stopWMMA;
cudaErrCheck(hipEventCreate(&startWMMA));
cudaErrCheck(hipEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N);
hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(hipEventRecord(startWMMA));
hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(hipEventRecord(stopWMMA));
hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(hipEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(hipEventDestroy(startWMMA));
cudaErrCheck(hipEventDestroy(stopWMMA));
//printf("D_CALCULATED\n");
//print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("D_WMMA\n");
//print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("CHECKING\n");
//compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(hipFree(a_htype));
cudaErrCheck(hipFree(b_htype));
cudaErrCheck(hipFree(c_htype));
cudaErrCheck(hipFree(d_htype));
cudaErrCheck(hipFree(a_atype));
cudaErrCheck(hipFree(b_btype));
cudaErrCheck(hipFree(c_ctype));
cudaErrCheck(hipFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(hipDeviceReset());
return 0;
}
| 81c422555d6b8977c9eddb7cc09c0f86d06d01ed.cu | #include <stdio.h>
#include <curand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (32)
#define MATRIX_N (8)
#define MATRIX_K (16)
const int WMMA_M =32;
const int WMMA_N =8;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef half ctype;
typedef half dtype;
typedef float host_type;
#define A_LAYOUT ROW_MAJOR
#define B_LAYOUT ROW_MAJOR
#define C_LAYOUT ROW_MAJOR
#define D_LAYOUT ROW_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
float t;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
for(int i=0; i < a_frag.num_elements; i++) {
t=static_cast<float>(a_frag.x[i]);
printf("A_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < b_frag.num_elements; i++) {
t=static_cast<float>(b_frag.x[i]);
printf("B_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < c_frag.num_elements; i++) {
t=static_cast<float>(c_frag.x[i]);
printf("C_THREAD%d: %.2f \n",threadIdx.x,t);
}
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
cudaEvent_t startWMMA;
cudaEvent_t stopWMMA;
cudaErrCheck(cudaEventCreate(&startWMMA));
cudaErrCheck(cudaEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K);
convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N);
convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(cudaEventRecord(startWMMA));
wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(cudaEventRecord(stopWMMA));
convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(cudaEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(cudaEventDestroy(startWMMA));
cudaErrCheck(cudaEventDestroy(stopWMMA));
//printf("D_CALCULATED\n");
//print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("D_WMMA\n");
//print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
//printf("CHECKING\n");
//compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(cudaFree(a_htype));
cudaErrCheck(cudaFree(b_htype));
cudaErrCheck(cudaFree(c_htype));
cudaErrCheck(cudaFree(d_htype));
cudaErrCheck(cudaFree(a_atype));
cudaErrCheck(cudaFree(b_btype));
cudaErrCheck(cudaFree(c_ctype));
cudaErrCheck(cudaFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
50af0733f516df161c9cf65b28febb8f9bb1441b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<vector>
const int sharedMem = 256*sizeof(double);
__global__ void redSum(double *a, double *out){
__shared__ double red_mat[sharedMem];
auto i = blockDim.x*blockIdx.x + threadIdx.x;
red_mat[threadIdx.x] = a[i];
__syncthreads();
for(auto k = 1; k < blockDim.x; k*=2){
auto index = 2*k*threadIdx.x; // Leads to shared memory bank conflicts
if(index < blockDim.x){
red_mat[index] += red_mat[index+k];
}
}
__syncthreads();
if(threadIdx.x == 0){
out[blockIdx.x] = red_mat[threadIdx.x];
}
}
int main(){
int N = 32768;
size_t size = N *sizeof(double);
std::vector<double> h_a(N);
std::vector<double> h_out(N, 0.0);
for(auto i = 0; i < N; i++){
h_a[i] = 1;
}
double *d_a, *d_out;
hipMalloc(&d_a, size);
hipMalloc(&d_out, size);
hipMemcpy(d_a, h_a.data(), size, hipMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = N/threadsPerBlock;hipLaunchKernelGGL((
redSum), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, d_out);
hipMemcpy(h_out.data(), d_out, size, hipMemcpyDeviceToHost);
std::cout << h_out[0] << std::endl;
hipFree(d_a);
hipFree(d_out);
return 0;
}
| 50af0733f516df161c9cf65b28febb8f9bb1441b.cu | #include<iostream>
#include<vector>
const int sharedMem = 256*sizeof(double);
__global__ void redSum(double *a, double *out){
__shared__ double red_mat[sharedMem];
auto i = blockDim.x*blockIdx.x + threadIdx.x;
red_mat[threadIdx.x] = a[i];
__syncthreads();
for(auto k = 1; k < blockDim.x; k*=2){
auto index = 2*k*threadIdx.x; // Leads to shared memory bank conflicts
if(index < blockDim.x){
red_mat[index] += red_mat[index+k];
}
}
__syncthreads();
if(threadIdx.x == 0){
out[blockIdx.x] = red_mat[threadIdx.x];
}
}
int main(){
int N = 32768;
size_t size = N *sizeof(double);
std::vector<double> h_a(N);
std::vector<double> h_out(N, 0.0);
for(auto i = 0; i < N; i++){
h_a[i] = 1;
}
double *d_a, *d_out;
cudaMalloc(&d_a, size);
cudaMalloc(&d_out, size);
cudaMemcpy(d_a, h_a.data(), size, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = N/threadsPerBlock;
redSum<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_out);
cudaMemcpy(h_out.data(), d_out, size, cudaMemcpyDeviceToHost);
std::cout << h_out[0] << std::endl;
cudaFree(d_a);
cudaFree(d_out);
return 0;
}
|
18cb215f744a397573a3e1c2dd97e84087f16817.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "avg_pool3d_forward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int B = 2;
int N = XSIZE*YSIZE;
int M = 2;
int C = 2;
int K = 1;
const int *nnIndex = NULL;
hipMalloc(&nnIndex, XSIZE*YSIZE);
const int *nnCount = NULL;
hipMalloc(&nnCount, XSIZE*YSIZE);
const float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
avg_pool3d_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, B,N,M,C,K,nnIndex,nnCount,input,output);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
avg_pool3d_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, B,N,M,C,K,nnIndex,nnCount,input,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
avg_pool3d_forward), dim3(gridBlock),dim3(threadBlock), 0, 0, B,N,M,C,K,nnIndex,nnCount,input,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 18cb215f744a397573a3e1c2dd97e84087f16817.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "avg_pool3d_forward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int B = 2;
int N = XSIZE*YSIZE;
int M = 2;
int C = 2;
int K = 1;
const int *nnIndex = NULL;
cudaMalloc(&nnIndex, XSIZE*YSIZE);
const int *nnCount = NULL;
cudaMalloc(&nnCount, XSIZE*YSIZE);
const float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
avg_pool3d_forward<<<gridBlock,threadBlock>>>(B,N,M,C,K,nnIndex,nnCount,input,output);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
avg_pool3d_forward<<<gridBlock,threadBlock>>>(B,N,M,C,K,nnIndex,nnCount,input,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
avg_pool3d_forward<<<gridBlock,threadBlock>>>(B,N,M,C,K,nnIndex,nnCount,input,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
12168e5f8ad917da8d239d9bac8527c467bdf96e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
extern "C" {
int test_add(void) {
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
};
} | 12168e5f8ad917da8d239d9bac8527c467bdf96e.cu | __global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
extern "C" {
int test_add(void) {
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
};
} |
355203b7e9a655b31aba5309b267785732e97623.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d2r-256-5-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 355203b7e9a655b31aba5309b267785732e97623.cu | #include <assert.h>
#include <stdio.h>
#include "box2d2r-256-5-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 49
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.03125f * A[t%2][i-2][j-2] +
0.03126f * A[t%2][i-2][j-1] +
0.03127f * A[t%2][i-2][j] +
0.03128f * A[t%2][i-2][j+1] +
0.03129f * A[t%2][i-2][j+2] +
0.03130f * A[t%2][i-1][j-2] +
0.03131f * A[t%2][i-1][j-1] +
0.03132f * A[t%2][i-1][j] +
0.03133f * A[t%2][i-1][j+1] +
0.03134f * A[t%2][i-1][j+2] +
0.03135f * A[t%2][i][j-2] +
0.03136f * A[t%2][i][j-1] +
0.24712f * A[t%2][i][j] +
0.03138f * A[t%2][i][j+1] +
0.03139f * A[t%2][i][j+2] +
0.03140f * A[t%2][i+1][j-2] +
0.03141f * A[t%2][i+1][j-1] +
0.03142f * A[t%2][i+1][j] +
0.03143f * A[t%2][i+1][j+1] +
0.03144f * A[t%2][i+1][j+2] +
0.03145f * A[t%2][i+2][j-2] +
0.03146f * A[t%2][i+2][j-1] +
0.03147f * A[t%2][i+2][j] +
0.03148f * A[t%2][i+2][j+1] +
0.03149f * A[t%2][i+2][j+2];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
3b3e3b726620919ccde3a8780abbfc6c8c7965cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dev_edges.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
int len = 1;
int nrX_ = 1;
int nrY_ = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dev_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, data,len,nrX_,nrY_);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dev_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, data,len,nrX_,nrY_);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dev_edges), dim3(gridBlock),dim3(threadBlock), 0, 0, data,len,nrX_,nrY_);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3b3e3b726620919ccde3a8780abbfc6c8c7965cf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dev_edges.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
int len = 1;
int nrX_ = 1;
int nrY_ = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dev_edges<<<gridBlock,threadBlock>>>(data,len,nrX_,nrY_);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dev_edges<<<gridBlock,threadBlock>>>(data,len,nrX_,nrY_);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dev_edges<<<gridBlock,threadBlock>>>(data,len,nrX_,nrY_);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9f3b17a2b7de39821361f2752333e71a4b30ead4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
// Kernel to add two integers
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = sizeof(int);
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
hipMalloc( (void**)&a_dev, size);
hipMalloc( (void**)&b_dev, size);
hipMalloc( (void**)&c_dev, size);
// Initialize
*a = 1;
*b = 2;
// Copy inputs to device
hipMemcpy( a_dev, a, size, hipMemcpyHostToDevice );
hipMemcpy( b_dev, b, size, hipMemcpyHostToDevice );
// Launch kernel on device
hipLaunchKernelGGL(( add) , dim3(1),dim3(1), 0, 0, a_dev,b_dev,c_dev);
// Copy device result back to host
hipMemcpy( c, c_dev, size, hipMemcpyDeviceToHost );
// Print result
printf("%d\n",*c);
// Free device memory
hipFree(a_dev);
hipFree(b_dev);
hipFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
}
| 9f3b17a2b7de39821361f2752333e71a4b30ead4.cu |
#include <stdio.h>
#include <stdlib.h>
// Kernel to add two integers
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = sizeof(int);
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
cudaMalloc( (void**)&a_dev, size);
cudaMalloc( (void**)&b_dev, size);
cudaMalloc( (void**)&c_dev, size);
// Initialize
*a = 1;
*b = 2;
// Copy inputs to device
cudaMemcpy( a_dev, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( b_dev, b, size, cudaMemcpyHostToDevice );
// Launch kernel on device
add <<<1,1>>> (a_dev,b_dev,c_dev);
// Copy device result back to host
cudaMemcpy( c, c_dev, size, cudaMemcpyDeviceToHost );
// Print result
printf("%d\n",*c);
// Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
}
|
9bfeec88e341df7e10fe5c18adf8bce50005c5d4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <metrics/trustworthiness.cuh>
#include <raft/cuda_utils.cuh>
#include <vector>
using namespace MLCommon;
using namespace ML::Metrics;
class TrustworthinessScoreTest : public ::testing::Test {
protected:
void basicTest() {
std::vector<float> X = {
5.6142087, 8.59787, -4.382763, -3.6452143, -5.8816037,
-0.6330313, 4.6920023, -0.79210913, 0.6106314, 2.1210914,
5.919943, -8.43784, -6.4819884, 0.41001374, -6.1052523,
-4.0825715, -5.314755, -2.834671, 5.751696, -6.5012555,
-0.4719201, -7.53353, 7.6789393, -1.4959852, -5.5977287,
-9.564147, 1.2902534, 3.559834, -6.7659483, 8.265964,
4.595404, 9.133477, -6.1553917, -6.319754, -2.9039452,
4.4150834, -3.094395, -4.426273, 9.584571, -5.64133,
6.6209483, 7.4044604, 3.9620576, 5.639907, 10.33007,
-0.8792053, 5.143776, -7.464049, 1.2448754, -5.6300974,
5.4518576, 4.119535, 6.749645, 7.627064, -7.2298336,
1.9681473, -6.9083176, 6.404673, 0.07186685, 9.0994835,
8.51037, -8.986389, 0.40534487, 2.115397, 4.086756,
1.2284287, -2.6272132, 0.06527536, -9.587425, -7.206078,
7.864875, 7.4397306, -6.9233336, -2.6643622, 3.3466153,
7.0408177, -3.6069896, -9.971769, 4.4075623, 7.9063697,
2.559074, 4.323717, 1.6867131, -1.1576937, -9.893141,
-3.251416, -7.4889135, -4.0588717, -2.73338, -7.4852257,
3.4460473, 9.759119, -5.4680476, -4.722435, -8.032619,
-1.4598992, 4.227361, 3.135568, 1.1950601, 1.1982028,
6.998856, -6.131138, -6.6921015, 0.5361224, -7.1213965,
-5.6104236, -7.2212887, -2.2710054, 8.544764, -6.0254574,
1.4582269, -5.5587835, 8.031556, -0.26328218, -5.2591386,
-9.262641, 2.8691363, 5.299787, -9.209455, 8.523085,
5.180329, 10.655528, -5.7171874, -6.7739563, -3.6306462,
4.067106, -1.5912259, -3.2345476, 8.042973, -3.6364832,
4.1242137, 9.886953, 5.4743724, 6.3058076, 9.369645,
-0.5175337, 4.9859877, -7.879498, 1.358422, -4.147944,
3.8984218, 5.894656, 6.4903927, 8.702036, -8.023722,
2.802145, -7.748032, 5.8461113, -0.34215945, 11.298865,
1.4107164, -9.949621, -1.6257563, -10.655836, 2.4528909,
1.1570255, 5.170669, 2.8398793, 7.1838694, 9.088459,
2.631155, 3.964414, 2.8769252, 0.04198391, -0.16993195,
3.6747139, -2.8377378, 6.1782537, 10.759618, -4.5642614,
-8.522967, 0.8614642, 6.623416, -1.029324, 5.5488334,
-7.804511, 2.128833, 7.9042315, 7.789576, -2.7944536,
0.72271067, -10.511495, -0.78634536, -10.661714, 2.9376361,
1.9148129, 6.22859, 0.26264945, 8.028384, 6.8743043,
0.9351067, 7.0690722, 4.2846055, 1.4134506, -0.18144785,
5.2778087, -1.7140163, 9.217541, 8.602799, -2.6537218,
-7.8377395, 1.1244944, 5.4540544, -0.38506773, 3.9885726,
-10.76455, 1.4440702, 9.136163, 6.664117, -5.7046547,
8.038592, -9.229767, -0.2799413, 3.6064725, 4.187257,
1.0516582, -2.0707326, -0.7615968, -8.561018, -3.7831352,
10.300297, 5.332594, -6.5880876, -4.2508664, 1.7985519,
5.7226253, -4.1223383, -9.6697855, 1.4885283, 7.524974,
1.7206005, 4.890457, 3.7264557, 0.4428284, -9.922455,
-4.250455, -6.4410596, -2.107994, -1.4109765, -6.1325397,
0.32883006, 6.0489736, 7.7257385, -8.281174, 1.0129383,
-10.792166, 8.378851, 10.802716, 9.848448, -9.188757,
1.3151443, 1.9971865, -2.521849, 4.3268294, -7.775683,
-2.2902298, 3.0824065, -7.17559, 9.6100855, 7.3965735,
-10.476525, 5.895973, -3.6974669, -7.6688933, 1.7354839,
-7.4045196, -1.7992063, -4.0394845, 5.2471714, -2.250571,
2.528036, -8.343515, -2.2374575, -10.019771, 0.73371273,
3.1853926, 2.7994921, 2.6637669, 7.620401, 7.515571,
0.68636256, 5.834537, 4.650282, -1.0362619, 0.4461701,
3.7870514, -4.1340904, 7.202998, 9.736904, -3.005512,
-8.920467, 1.1228397, 6.2598724, 1.2812365, 4.5442104,
-8.791537, 0.92113096, 8.464749, 8.359035, -4.3923397,
1.2252625, -10.1986475, -1.4409319, -10.013967, 3.9071581,
1.683064, 4.877419, 1.6570637, 9.559105, 7.3546534,
0.36635467, 5.220211, 4.6303267, 0.6601065, 0.16149978,
3.8818731, -3.4438233, 8.42085, 8.659159, -3.0935583,
-8.039611, 2.3060374, 5.134666, 1.0458113, 6.0190983,
-9.143728, 0.99048865, 9.210842, 6.670241, -5.9614363,
0.8747396, 7.078824, 8.067469, -10.314754, 0.45977542,
-9.28306, 9.1838665, 9.318644, 7.189082, -11.092555,
1.0320464, 3.882163, 0.10953151, 7.9029684, -6.9068265,
-1.3526366, 5.3996363, -8.430931, 11.452577, 6.39663,
-11.090514, 4.6662245, -3.1268113, -8.357452, 2.2276728,
-10.357126, -0.9291848, -3.4193344, 3.1289792, -2.5030103,
6.772719, 11.457757, -4.2125936, -6.684548, -4.7611327,
3.6960156, -2.3030636, -3.0591488, 10.452471, -4.1267314,
5.66614, 7.501461, 5.072407, 6.636537, 8.990381,
-0.2559256, 4.737867, -6.2149944, 2.535682, -5.5484023,
5.7113924, 3.4742818, 7.9915137, 7.0052586, -7.156467,
1.4354781, -8.286235, 5.7523417, -2.4175215, 9.678009,
0.05066403, -9.645226, -2.2658763, -9.518178, 4.493372,
2.3232365, 2.1659086, 0.42507997, 8.360246, 8.23535,
2.6878164, 5.236947, 3.4924245, -0.6089895, 0.8884741,
4.359464, -4.6073823, 7.83441, 8.958755, -3.4690795,
-9.182282, 1.2478025, 5.6311107, -1.2408862, 3.6316886,
-8.684654, 2.1078515, 7.2813864, 7.9265943, -3.6135032,
0.4571511, 8.493568, 10.496853, -7.432897, 0.8625995,
-9.607528, 7.2899456, 8.83158, 8.908199, -10.300263,
1.1451302, 3.7871468, -0.97040755, 5.7664757, -8.9688,
-2.146672, 5.9641485, -6.2908535, 10.126465, 6.1553903,
-12.066902, 6.301596, -5.0419583, -8.228695, 2.4879954,
-8.918582, -3.7434099, -4.1593685, 3.7431836, -1.1704745,
0.5524103, 9.109399, 9.571567, -11.209955, 1.2462777,
-9.554555, 9.091726, 11.477966, 7.630937, -10.450911,
1.9205878, 5.358983, -0.44546837, 6.7611346, -9.74753,
-0.5939732, 3.8892255, -6.437991, 10.294727, 5.6723895,
-10.7883, 6.192348, -5.293862, -10.811491, 1.0194173,
-7.074576, -3.192368, -2.5231771, 4.2791643, -0.53309685,
0.501366, 9.636625, 7.710316, -6.4219728, 1.0975566,
-8.218886, 6.9011984, 9.873679, 8.903804, -9.316832,
1.2404599, 4.9039655, 1.2272617, 4.541515, -5.2753224,
-3.2196746, 3.1303136, -7.285681, 9.041425, 5.6417427,
-9.93667, 5.7548947, -5.113397, -8.544622, 4.182665,
-7.7709813, -3.2810235, -3.312072, 3.8900535, -2.0604856,
6.709082, -8.461194, 1.2666026, 4.8770437, 2.6955879,
3.0340345, -1.1614609, -3.536341, -7.090382, -5.36146,
9.072544, 6.4554095, -4.4728956, -1.88395, 3.1095037,
8.782348, -3.316743, -8.65248, 1.6802986, 8.186188,
2.1783829, 4.931278, 4.158475, 1.4033595, -11.320101,
-3.7084908, -6.740436, -2.5555193, -1.0451177, -6.5569925,
0.82810307, 8.505919, 8.332857, -9.488569, -0.21588463,
-8.056692, 8.493993, 7.6401625, 8.812983, -9.377281,
2.4369764, 3.1766508, 0.6300803, 5.6666765, -7.913654,
-0.42301777, 4.506412, -7.8954244, 10.904591, 5.042256,
-9.626183, 8.347351, -3.605006, -7.923387, 1.1024277,
-8.705793, -2.5151258, -2.5066147, 4.0515003, -2.060757,
6.2635093, 8.286584, -6.0509276, -6.76452, -3.1158175,
1.6578803, -1.4608748, -1.24211, 8.151246, -4.2970877,
6.093071, 7.4911637, 4.51018, 4.8425875, 9.211085,
-2.4386222, 4.5830803, -5.6079445, 2.3713675, -4.0707507,
3.1787417, 5.462342, 6.915912, 6.3928423, -7.2970796,
5.0112796, -9.140893, 4.9990606, 0.38391754, 7.7088532,
1.9340848, 8.18833, 8.16617, -9.42086, -0.3388326,
-9.659727, 8.243045, 8.099073, 8.439428, -7.038694,
2.1077902, 3.3866816, -1.9975324, 7.4972878, -7.2525196,
-1.553731, 4.08758, -6.6922374, 9.50525, 4.026735,
-9.243538, 7.2740564, -3.9319072, -6.3228955, 1.6693478,
-7.923119, -3.7423058, -2.2813146, 5.3469067, -1.8285407,
3.3118162, 8.826356, -4.4641976, -6.4751124, -9.200089,
-2.519147, 4.225298, 2.4105988, -0.4344186, 0.53441775,
5.2836394, -8.2816105, -4.996147, -1.6870759, -7.8543897,
-3.9788852, -7.0346904, -3.1289773, 7.4567637, -5.6227813,
1.0709786, -8.866012, 8.427324, -1.1755563, -5.789216,
-8.197835, 5.3342214, 6.0646234, -6.8975716, 7.717031,
3.480355, 8.312151, -3.6645212, -3.0976524, -8.090359,
-1.9176173, 2.4257212, 1.9700835, 0.4098958, 2.1341088,
7.652741, -9.9595585, -5.989757, 0.10119354, -7.935407,
-5.792786, -5.22783, -4.318978, 5.414037, -6.4621663,
1.670883, -6.9224787, 8.696932, -2.0214002, -6.6681314,
-8.326418, 4.9049683, 5.4442496, -6.403739, 7.5822453,
7.0972915, -9.072851, -0.23897195, 1.7662339, 5.3096304,
1.983179, -2.222645, -0.34700772, -9.094717, -6.107907,
9.525174, 8.1550665, -5.6940084, -4.1636486, 1.7360662,
8.528821, -3.7299833, -9.341266, 2.608542, 9.108706,
0.7978509, 4.2488184, 2.454484, 0.9446999, -10.106636,
-3.8973773, -6.6566644, -4.5647273, -0.99837756, -6.568582,
9.324853, -7.9020953, 2.0910501, 2.2896829, 1.6790711,
1.3159255, -3.5258796, 1.8898442, -8.105812, -4.924962,
8.771129, 7.1202874, -5.991957, -3.4106019, 2.4450088,
7.796387, -3.055946, -7.8971434, 1.9856719, 9.001636,
1.8511922, 3.019749, 3.1227696, 0.4822102, -10.021213,
-3.530504, -6.225959, -3.0029628, -1.7881511, -7.3879776,
1.3925704, 9.499782, -3.7318087, -3.7074296, -7.7466836,
-1.5284524, 4.0535855, 3.112011, 0.10340207, -0.5429599,
6.67026, -9.155924, -4.924038, 0.64248866, -10.0103655,
-3.2742946, -4.850029, -3.6707063, 8.586258, -5.855605,
4.906918, -6.7813993, 7.9938135, -2.5473144, -5.688948,
-7.822478, 2.1421318, 4.66659, -9.701272, 9.549149,
0.8998125, -8.651497, -0.56899565, -8.639817, 2.3088377,
2.1264515, 3.2764478, 2.341989, 8.594338, 8.630639,
2.8440373, 6.2043204, 4.433932, 0.6320018, -1.8179281,
5.09452, -1.5741565, 8.153934, 8.744339, -3.6945698,
-8.883078, 1.5329908, 5.2745943, 0.44716078, 4.8809066,
-7.9594903, 1.134374, 9.233994, 6.5528665, -4.520542,
9.477355, -8.622195, -0.23191702, 2.0485356, 3.9379985,
1.5916302, -1.4516805, -0.0843819, -7.8554378, -5.88308,
7.999766, 6.2572145, -5.585321, -4.0097756, 0.42382592,
6.160884, -3.631315, -8.333449, 2.770595, 7.8495173,
3.3331623, 4.940415, 3.6207345, -0.037517, -11.034698,
-3.185103, -6.614664, -3.2177854, -2.0792234, -6.8879867,
7.821685, -8.455084, 1.0784642, 4.0033927, 2.7343264,
2.6052725, -4.1224284, -0.89305353, -6.8267674, -4.9715133,
8.880253, 5.6994023, -5.9695024, -4.9181266, 1.3017995,
7.972617, -3.9452884, -10.424556, 2.4504194, 6.21529,
0.93840516, 4.2070026, 6.159839, 0.91979957, -8.706724,
-4.317946, -6.6823545, -3.0388, -2.464262, -7.3716645,
1.3926703, 6.544412, -5.6251183, -5.122411, -8.622049,
-2.3905911, 3.9138813, 1.9779967, -0.05011125, 0.13310997,
7.229751, -9.742043, -8.08724, 1.2426697, -7.9230795,
-3.3162494, -7.129571, -3.5488048, 7.4701195, -5.2357526,
0.5917681, -6.272206, 6.342328, -2.909731, -4.991607,
-8.845513, 3.3228495, 7.033246, -7.8180246, 8.214469,
6.3910093, 9.185153, -6.20472, -7.713809, -3.8481297,
3.5579286, 0.7078448, -3.2893546, 7.384514, -4.448121,
3.0104196, 9.492943, 8.024847, 4.9114385, 9.965594,
-3.014036, 5.182494, -5.8806014, 2.5312455, -5.9926524,
4.474469, 6.3717875, 6.993105, 6.493093, -8.935534,
3.004074, -8.055647, 8.315765, -1.3026813, 8.250377,
0.02606229, 6.8508425, 9.655665, -7.0116496, -0.41060972,
-10.049198, 7.897801, 6.7791023, 8.3362, -9.821014,
2.491157, 3.5160472, -1.6228812, 7.398063, -8.769123,
-3.1743705, 3.2827861, -6.497855, 10.831924, 5.2761307,
-9.704417, 4.3817043, -3.9841619, -8.111647, 1.1883026,
-8.115312, -2.9240117, -5.8879666, 4.20928, -0.3587938,
6.935672, -10.177582, 0.48819053, 3.1250648, 2.9306343,
3.082544, -3.477687, -1.3768549, -7.4922366, -3.756631,
10.039836, 3.6670392, -5.9761434, -4.4728765, 3.244255,
7.027899, -2.3806512, -10.4100685, 1.605716, 7.7953773,
0.5408159, 1.7156523, 3.824097, -1.0604783, -10.142124,
-5.246805, -6.5283823, -4.579547, -2.42714, -6.709197,
2.7782338, 7.33353, -6.454507, -2.9929368, -7.8362985,
-2.695445, 2.4900775, 1.6682367, 0.4641757, -1.0495365,
6.9631333, -9.291356, -8.23837, -0.34263706, -8.275113,
-2.8454232, -5.0864096, -2.681942, 7.5450225, -6.2517986,
0.06810654, -6.470652, 4.9042645, -1.8369255, -6.6937943,
-7.9625087, 2.8510258, 6.180508, -8.282598, 7.919079,
1.4897474, 6.7217417, -4.2459426, -4.114431, -8.375707,
-2.143264, 5.6972933, 1.5574739, 0.39375135, 1.7930849,
5.1737595, -7.826241, -5.160268, -0.80433255, -7.839536,
-5.2620406, -5.4643164, -3.185536, 6.620315, -7.065227,
1.0524757, -6.125088, 5.7126627, -1.6161644, -3.852159,
-9.164279, 2.7005782, 5.946544, -8.468236, 8.2145405,
1.1035942, 6.590157, -4.0461283, -4.8090615, -7.6702685,
-2.1121511, 5.1147075, 1.6128504, 2.0064135, 1.0544407,
6.0038295, -7.8282537, -4.801278, 0.32349443, -8.0649805,
-4.372714, -5.61336, -5.21394, 8.176595, -5.4753284,
1.7800134, -8.267283, 7.2133374, -0.16594432, -6.317046,
-9.490406, 4.1261597, 5.473317, -7.7551675, 7.007468,
7.478628, -8.801905, 0.10975724, 3.5478222, 4.797803,
1.3825226, -3.357369, 0.99262005, -6.94877, -5.4781394,
9.632604, 5.7492557, -5.9014316, -3.1632116, 2.340859,
8.708098, -3.1255999, -8.848661, 4.5612836, 8.455157,
0.73460823, 4.112301, 4.392744, -0.30759293, -6.8036823,
-3.0331545, -8.269506, -2.82415, -0.9411246, -5.993506,
2.1618164, -8.716055, -0.7432543, -10.255819, 3.095418,
2.5131428, 4.752442, 0.9907621, 7.8279433, 7.85814,
0.50430876, 5.2840405, 4.457291, 0.03330028, -0.40692952,
3.9244103, -2.117118, 7.6977615, 8.759009, -4.2157164,
-9.136053, 3.247858, 4.668686, 0.76162136, 5.3833632,
-9.231471, 0.44309422, 8.380872, 6.7211227, -3.091507,
2.173508, -9.038242, -1.3666698, -9.819077, 0.37825826,
2.3898845, 4.2440815, 1.9161536, 7.24787, 6.9124637,
1.6238527, 5.1140285, 3.1935842, 1.02845, -1.1273454,
5.638998, -2.497932, 8.342559, 8.586319, -2.9069402,
-7.6387944, 3.5975037, 4.4115705, 0.41506064, 4.9078383,
-9.68327, 1.8159529, 9.744613, 8.40622, -4.495336,
9.244892, -8.789869, 1.3158468, 4.018167, 3.3922846,
2.652022, -2.7495477, 0.2528986, -8.268324, -6.004913,
10.428784, 6.6580734, -5.537176, -1.7177434, 2.7504628,
6.7735, -2.4454272, -9.998361, 2.9483433, 6.8266654,
2.3787718, 4.472637, 2.5871701, 0.7355365, -7.7027745,
-4.1879907, -7.172832, -4.1843605, -0.03646783, -5.419406,
6.958486, 11.011111, -7.1821184, -7.956423, -3.408451,
4.6850276, -2.348787, -4.398289, 6.9787564, -3.8324208,
5.967827, 8.433518, 4.660108, 5.5657144, 9.964243,
-1.3515275, 6.404833, -6.4805903, 2.4379845, -6.0816774,
1.752272, 5.3771873, 6.9613523, 6.9788294, -6.3894596,
3.7521114, -6.8034263, 6.4458385, -0.7233525, 10.512529,
4.362273, 9.231461, -6.3382263, -7.659, -3.461823,
4.71463, 0.17817476, -3.685746, 7.2962036, -4.6489477,
5.218017, 11.546999, 4.7218375, 6.8498397, 9.281103,
-3.900459, 6.844054, -7.0886965, -0.05019227, -8.233724,
5.5808983, 6.374517, 8.321048, 7.969449, -7.3478637,
1.4917561, -8.003144, 4.780668, -1.1981848, 7.753739,
2.0260844, -8.880096, -3.4258451, -7.141975, 1.9637157,
1.814725, 5.311151, 1.4831505, 7.8483663, 7.257948,
1.395786, 6.417756, 5.376912, 0.59505713, 0.00062552,
3.6634305, -4.159713, 7.3571978, 10.966816, -2.5419605,
-8.466229, 1.904205, 5.6338267, -0.52567476, 5.59736,
-8.361799, 0.5009981, 8.460681, 7.3891273, -3.5272243,
5.0552278, 9.921456, -7.69693, -7.286378, -1.9198836,
3.1666567, -2.5832257, -2.2445817, 9.888111, -5.076563,
5.677401, 7.497946, 5.662994, 5.414262, 8.566503,
-2.5530663, 7.1032815, -6.0612082, 1.3419591, -4.9595256,
4.3377542, 4.3790717, 6.793512, 8.383502, -7.1278043,
3.3240774, -9.379446, 6.838661, -0.81241214, 8.694813,
0.79141915, 7.632467, 8.575382, -8.533798, 0.28954387,
-7.5675836, 5.8653326, 8.97235, 7.1649346, -10.575289,
0.9359381, 5.02381, -0.5609511, 5.543464, -7.69131,
-2.1792977, 2.4729247, -6.1917787, 10.373678, 7.6549597,
-8.809486, 5.5657206, -3.3169382, -8.042887, 2.0874746,
-7.079005, -3.33398, -3.6843317, 4.0172358, -2.0754814,
1.1726758, 7.4618697, 6.9483604, -8.469206, 0.7401797,
-10.318176, 8.384557, 10.5476265, 9.146971, -9.250223,
0.6290606, 4.4941425, -0.7514017, 7.2271705, -8.309598,
-1.4761636, 4.0140634, -6.021102, 9.132852, 5.6610966,
-11.249811, 8.359293, -1.9445792, -7.7393436, -0.3931331,
-8.824441, -2.5995944, -2.5714035, 4.140213, -3.6863053,
5.517265, 9.020411, -4.9286127, -7.871219, -3.7446704,
2.5179656, -1.4543481, -2.2703636, 7.010597, -3.6436229,
6.753862, 7.4129915, 7.1406755, 5.653706, 9.5445175,
0.15698843, 4.761813, -7.698002, 1.6870106, -4.5410123,
4.171763, 5.3747005, 6.341021, 7.456738, -8.231657,
2.763487, -9.208167, 6.676799, -1.1957736, 10.062605,
4.0975976, 7.312957, -2.4981596, -2.9658387, -8.150425,
-2.1075552, 2.64375, 1.6636052, 1.1483809, 0.09276015,
5.8556347, -7.8481026, -5.9913163, -0.02840613, -9.937289,
-1.0486673, -5.2340155, -3.83912, 7.7165728, -8.409944,
0.80863273, -6.9119215, 7.5712357, 0.36031485, -6.056131,
-8.470033, 1.8678337, 3.0121377, -7.3096333, 8.205484,
5.262654, 8.774514, -4.7603083, -7.2096143, -4.437014,
3.6080024, -1.624254, -4.2787876, 8.880863, -4.8984556,
5.1782074, 9.944454, 3.911282, 3.5396595, 8.867042,
-1.2006199, 5.393288, -5.6455317, 0.7829499, -4.0338907,
2.479272, 6.5080743, 8.582535, 7.0097537, -6.9823785,
3.984318, -7.225381, 5.3135114, -1.0391048, 8.951443,
-0.70119005, -8.510742, -0.42949116, -10.9224825, 2.8176029,
1.6800792, 5.778404, 1.7269998, 7.1975236, 7.7258267,
2.7632928, 5.3399253, 3.4650044, 0.01971426, -1.6468811,
4.114996, -1.5110453, 6.8689218, 8.269899, -3.1568048,
-7.0344677, 1.2911975, 5.950357, 0.19028673, 4.657226,
-8.199647, 2.246055, 8.989509, 5.3101015, -4.2400866};
std::vector<float> X_embedded = {
-0.41849962, -0.53906363, 0.46958843, -0.35832694, -0.23779503,
-0.29751351, -0.01072748, -0.21353109, -0.54769957, -0.55086273,
0.37093949, -0.12714292, -0.06639574, -0.36098689, -0.13060696,
-0.07362658, -1.01205945, -0.39285606, 0.2864089, -0.32031146,
-0.19595343, 0.08900568, -0.04813879, -0.06563424, -0.42655188,
-0.69014251, 0.51459783, -0.1942696, -0.07767916, -0.6119386,
0.04813685, -0.22557008, -0.56890118, -0.60293794, 0.43429622,
-0.09240723, -0.00624062, -0.25800395, -0.1886092, 0.01655941,
-0.01961523, -0.14147359, 0.41414487, -0.8512944, -0.61199242,
-0.18586016, 0.14024924, -0.41635606, -0.02890144, 0.1065347,
0.39700791, -1.14060664, -0.95313865, 0.14416681, 0.17306046,
-0.53189689, -0.98987544, -0.67918193, 0.41787854, -0.20878236,
-0.06612862, 0.03502904, -0.03765266, -0.0980606, -0.00971657,
0.29432917, 0.36575687, -1.1645509, -0.89094597, 0.03718805,
0.2310573, -0.38345811, -0.10401925, -0.10653082, 0.38469055,
-0.88302094, -0.80197543, 0.03548668, 0.02775662, -0.54374295,
0.03379983, 0.00923623, 0.29320273, -1.05263519, -0.93360096,
0.03778313, 0.12360487, -0.56437284, 0.0644429, 0.33432651,
0.36450726, -1.22978747, -0.83822101, -0.18796451, 0.34888434,
-0.3801491, -0.45327303, -0.59747899, 0.39697698, -0.15616602,
-0.06159166, -0.40301991, -0.11725303, -0.11913263, -0.12406619,
-0.11227967, 0.43083835, -0.90535849, -0.81646025, 0.10012121,
-0.0141237, -0.63747931, 0.04805023, 0.34190539, 0.50725192,
-1.17861414, -0.74641538, -0.09333111, 0.27992678, -0.56214809,
0.04970971, 0.36249384, 0.57705611, -1.16913795, -0.69849908,
0.10957897, 0.27983218, -0.62088525, 0.0410459, 0.23973398,
0.40960434, -1.14183664, -0.83321381, 0.02149482, 0.21720445,
-0.49869928, -0.95655465, -0.51680422, 0.45761383, -0.08351214,
-0.12151554, 0.00819737, -0.20813803, -0.01055793, 0.25319234,
0.36154974, 0.1822421, -1.15837133, -0.92209691, -0.0501582,
0.08535917, -0.54003763, -1.08675635, -1.04009593, 0.09408128,
0.07009826, -0.01762833, -0.19180447, -0.18029785, -0.20342001,
0.04034991, 0.1814747, 0.36906669, -1.13532007, -0.8852452,
0.0782818, 0.16825101, -0.50301319, -0.29128098, -0.65341312,
0.51484352, -0.38758236, -0.22531103, -0.55021971, 0.10804344,
-0.3521522, -0.38849035, -0.74110794, 0.53761131, -0.25142813,
-0.1118066, -0.47453368, 0.06347904, -0.23796193, -1.02682328,
-0.47594091, 0.39515916, -0.2782529, -0.16566519, 0.08063579,
0.00810116, -0.06213913, -1.059654, -0.62496334, 0.53698546,
-0.11806234, 0.00356161, 0.11513405, -0.14213292, 0.04102662,
-0.36622161, -0.73686272, 0.48323864, -0.27338892, -0.14203401,
-0.41736352, 0.03332564, -0.21907479, -0.06396769, 0.01831361,
0.46263444, -1.01878166, -0.86486858, 0.17622118, -0.01249686,
-0.74530888, -0.9354887, -0.5027945, 0.38170099, -0.15547098,
0.00677824, -0.04677663, -0.13541745, 0.07253501, -0.97933143,
-0.58001202, 0.48235369, -0.18836913, -0.02430783, 0.07572441,
-0.08101331, 0.00630076, -0.16881248, -0.67989182, 0.46083611,
-0.43910736, -0.29321918, -0.38735861, 0.07669903, -0.29749861,
-0.40047669, -0.56722462, 0.33168188, -0.13118173, -0.06672747,
-0.56856316, -0.26269144, -0.14236671, 0.10651901, 0.4962585,
0.38848072, -1.06653547, -0.64079332, -0.47378591, 0.43195483,
-0.04856951, -0.9840439, -0.70610428, 0.34028092, -0.2089237,
-0.05382041, 0.01625874, -0.02080803, -0.12535211, -0.04146428,
-1.24533033, 0.48944879, 0.0578458, 0.26708388, -0.90321028,
0.35377088, -0.36791429, -0.35382384, -0.52748734, 0.42854419,
-0.31744713, -0.19174226, -0.39073724, -0.03258846, -0.19978228,
-0.36185205, -0.57412046, 0.43681973, -0.25414538, -0.12904905,
-0.46334973, -0.03123853, -0.11303604, -0.87073672, -0.45441297,
0.41825858, -0.25303507, -0.21845073, 0.10248682, -0.11045569,
-0.10002795, -0.00572806, 0.16519061, 0.42651513, -1.11417019,
-0.83789682, 0.02995787, 0.16843079, -0.53874511, 0.03056994,
0.17877036, 0.49632853, -1.03276777, -0.74778616, -0.03971953,
0.10907949, -0.67385727, -0.9523471, -0.56550741, 0.40409449,
-0.2703723, -0.10175014, 0.13605487, -0.06306008, -0.01768126,
-0.4749442, -0.56964815, 0.39389887, -0.19248079, -0.04161081,
-0.38728487, -0.20341556, -0.12656988, -0.35949609, -0.46137866,
0.28798422, -0.06603147, -0.04363992, -0.60343552, -0.23565227,
-0.10242701, -0.06792886, 0.09689897, 0.33259571, -0.98854214,
-0.84444433, 0.00673901, 0.13457057, -0.43145794, -0.51500046,
-0.50821936, 0.38000089, 0.0132636, 0.0580942, -0.40157595,
-0.11967677, 0.02549113, -0.10350953, 0.22918226, 0.40411913,
-1.05619383, -0.71218503, -0.02197581, 0.26422262, -0.34765676,
0.06601537, 0.21712676, 0.34723559, -1.20982027, -0.95646334,
0.00793948, 0.27620381, -0.43475035, -0.67326003, -0.6137197,
0.43724492, -0.17666136, -0.06591748, -0.18937394, -0.07400128,
-0.06881691, -0.5201112, -0.61088628, 0.4225319, -0.18969463,
-0.06921366, -0.33993208, -0.06990873, -0.10288513, -0.70659858,
-0.56003648, 0.46628812, -0.16090363, -0.0185108, -0.1431348,
-0.1128775, -0.0078648, -0.02323332, 0.04292452, 0.39291084,
-0.94897962, -0.63863206, -0.16546988, 0.23698957, -0.30633628};
raft::handle_t h;
hipStream_t stream = h.get_stream();
auto d_alloc = h.get_device_allocator();
float* d_X = (float*)d_alloc->allocate(X.size() * sizeof(float), stream);
float* d_X_embedded =
(float*)d_alloc->allocate(X_embedded.size() * sizeof(float), stream);
raft::update_device(d_X, X.data(), X.size(), stream);
raft::update_device(d_X_embedded, X_embedded.data(), X_embedded.size(),
stream);
// euclidean test
score =
trustworthiness_score<float,
raft::distance::DistanceType::L2SqrtUnexpanded>(
h, d_X, d_X_embedded, 50, 30, 8, 5);
d_alloc->deallocate(d_X, X.size() * sizeof(float), stream);
d_alloc->deallocate(d_X_embedded, X_embedded.size() * sizeof(float),
stream);
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
double score;
};
typedef TrustworthinessScoreTest TrustworthinessScoreTestF;
TEST_F(TrustworthinessScoreTestF, Result) {
ASSERT_TRUE(0.9374 < score && score < 0.9376);
}
| 9bfeec88e341df7e10fe5c18adf8bce50005c5d4.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <metrics/trustworthiness.cuh>
#include <raft/cuda_utils.cuh>
#include <vector>
using namespace MLCommon;
using namespace ML::Metrics;
class TrustworthinessScoreTest : public ::testing::Test {
protected:
void basicTest() {
std::vector<float> X = {
5.6142087, 8.59787, -4.382763, -3.6452143, -5.8816037,
-0.6330313, 4.6920023, -0.79210913, 0.6106314, 2.1210914,
5.919943, -8.43784, -6.4819884, 0.41001374, -6.1052523,
-4.0825715, -5.314755, -2.834671, 5.751696, -6.5012555,
-0.4719201, -7.53353, 7.6789393, -1.4959852, -5.5977287,
-9.564147, 1.2902534, 3.559834, -6.7659483, 8.265964,
4.595404, 9.133477, -6.1553917, -6.319754, -2.9039452,
4.4150834, -3.094395, -4.426273, 9.584571, -5.64133,
6.6209483, 7.4044604, 3.9620576, 5.639907, 10.33007,
-0.8792053, 5.143776, -7.464049, 1.2448754, -5.6300974,
5.4518576, 4.119535, 6.749645, 7.627064, -7.2298336,
1.9681473, -6.9083176, 6.404673, 0.07186685, 9.0994835,
8.51037, -8.986389, 0.40534487, 2.115397, 4.086756,
1.2284287, -2.6272132, 0.06527536, -9.587425, -7.206078,
7.864875, 7.4397306, -6.9233336, -2.6643622, 3.3466153,
7.0408177, -3.6069896, -9.971769, 4.4075623, 7.9063697,
2.559074, 4.323717, 1.6867131, -1.1576937, -9.893141,
-3.251416, -7.4889135, -4.0588717, -2.73338, -7.4852257,
3.4460473, 9.759119, -5.4680476, -4.722435, -8.032619,
-1.4598992, 4.227361, 3.135568, 1.1950601, 1.1982028,
6.998856, -6.131138, -6.6921015, 0.5361224, -7.1213965,
-5.6104236, -7.2212887, -2.2710054, 8.544764, -6.0254574,
1.4582269, -5.5587835, 8.031556, -0.26328218, -5.2591386,
-9.262641, 2.8691363, 5.299787, -9.209455, 8.523085,
5.180329, 10.655528, -5.7171874, -6.7739563, -3.6306462,
4.067106, -1.5912259, -3.2345476, 8.042973, -3.6364832,
4.1242137, 9.886953, 5.4743724, 6.3058076, 9.369645,
-0.5175337, 4.9859877, -7.879498, 1.358422, -4.147944,
3.8984218, 5.894656, 6.4903927, 8.702036, -8.023722,
2.802145, -7.748032, 5.8461113, -0.34215945, 11.298865,
1.4107164, -9.949621, -1.6257563, -10.655836, 2.4528909,
1.1570255, 5.170669, 2.8398793, 7.1838694, 9.088459,
2.631155, 3.964414, 2.8769252, 0.04198391, -0.16993195,
3.6747139, -2.8377378, 6.1782537, 10.759618, -4.5642614,
-8.522967, 0.8614642, 6.623416, -1.029324, 5.5488334,
-7.804511, 2.128833, 7.9042315, 7.789576, -2.7944536,
0.72271067, -10.511495, -0.78634536, -10.661714, 2.9376361,
1.9148129, 6.22859, 0.26264945, 8.028384, 6.8743043,
0.9351067, 7.0690722, 4.2846055, 1.4134506, -0.18144785,
5.2778087, -1.7140163, 9.217541, 8.602799, -2.6537218,
-7.8377395, 1.1244944, 5.4540544, -0.38506773, 3.9885726,
-10.76455, 1.4440702, 9.136163, 6.664117, -5.7046547,
8.038592, -9.229767, -0.2799413, 3.6064725, 4.187257,
1.0516582, -2.0707326, -0.7615968, -8.561018, -3.7831352,
10.300297, 5.332594, -6.5880876, -4.2508664, 1.7985519,
5.7226253, -4.1223383, -9.6697855, 1.4885283, 7.524974,
1.7206005, 4.890457, 3.7264557, 0.4428284, -9.922455,
-4.250455, -6.4410596, -2.107994, -1.4109765, -6.1325397,
0.32883006, 6.0489736, 7.7257385, -8.281174, 1.0129383,
-10.792166, 8.378851, 10.802716, 9.848448, -9.188757,
1.3151443, 1.9971865, -2.521849, 4.3268294, -7.775683,
-2.2902298, 3.0824065, -7.17559, 9.6100855, 7.3965735,
-10.476525, 5.895973, -3.6974669, -7.6688933, 1.7354839,
-7.4045196, -1.7992063, -4.0394845, 5.2471714, -2.250571,
2.528036, -8.343515, -2.2374575, -10.019771, 0.73371273,
3.1853926, 2.7994921, 2.6637669, 7.620401, 7.515571,
0.68636256, 5.834537, 4.650282, -1.0362619, 0.4461701,
3.7870514, -4.1340904, 7.202998, 9.736904, -3.005512,
-8.920467, 1.1228397, 6.2598724, 1.2812365, 4.5442104,
-8.791537, 0.92113096, 8.464749, 8.359035, -4.3923397,
1.2252625, -10.1986475, -1.4409319, -10.013967, 3.9071581,
1.683064, 4.877419, 1.6570637, 9.559105, 7.3546534,
0.36635467, 5.220211, 4.6303267, 0.6601065, 0.16149978,
3.8818731, -3.4438233, 8.42085, 8.659159, -3.0935583,
-8.039611, 2.3060374, 5.134666, 1.0458113, 6.0190983,
-9.143728, 0.99048865, 9.210842, 6.670241, -5.9614363,
0.8747396, 7.078824, 8.067469, -10.314754, 0.45977542,
-9.28306, 9.1838665, 9.318644, 7.189082, -11.092555,
1.0320464, 3.882163, 0.10953151, 7.9029684, -6.9068265,
-1.3526366, 5.3996363, -8.430931, 11.452577, 6.39663,
-11.090514, 4.6662245, -3.1268113, -8.357452, 2.2276728,
-10.357126, -0.9291848, -3.4193344, 3.1289792, -2.5030103,
6.772719, 11.457757, -4.2125936, -6.684548, -4.7611327,
3.6960156, -2.3030636, -3.0591488, 10.452471, -4.1267314,
5.66614, 7.501461, 5.072407, 6.636537, 8.990381,
-0.2559256, 4.737867, -6.2149944, 2.535682, -5.5484023,
5.7113924, 3.4742818, 7.9915137, 7.0052586, -7.156467,
1.4354781, -8.286235, 5.7523417, -2.4175215, 9.678009,
0.05066403, -9.645226, -2.2658763, -9.518178, 4.493372,
2.3232365, 2.1659086, 0.42507997, 8.360246, 8.23535,
2.6878164, 5.236947, 3.4924245, -0.6089895, 0.8884741,
4.359464, -4.6073823, 7.83441, 8.958755, -3.4690795,
-9.182282, 1.2478025, 5.6311107, -1.2408862, 3.6316886,
-8.684654, 2.1078515, 7.2813864, 7.9265943, -3.6135032,
0.4571511, 8.493568, 10.496853, -7.432897, 0.8625995,
-9.607528, 7.2899456, 8.83158, 8.908199, -10.300263,
1.1451302, 3.7871468, -0.97040755, 5.7664757, -8.9688,
-2.146672, 5.9641485, -6.2908535, 10.126465, 6.1553903,
-12.066902, 6.301596, -5.0419583, -8.228695, 2.4879954,
-8.918582, -3.7434099, -4.1593685, 3.7431836, -1.1704745,
0.5524103, 9.109399, 9.571567, -11.209955, 1.2462777,
-9.554555, 9.091726, 11.477966, 7.630937, -10.450911,
1.9205878, 5.358983, -0.44546837, 6.7611346, -9.74753,
-0.5939732, 3.8892255, -6.437991, 10.294727, 5.6723895,
-10.7883, 6.192348, -5.293862, -10.811491, 1.0194173,
-7.074576, -3.192368, -2.5231771, 4.2791643, -0.53309685,
0.501366, 9.636625, 7.710316, -6.4219728, 1.0975566,
-8.218886, 6.9011984, 9.873679, 8.903804, -9.316832,
1.2404599, 4.9039655, 1.2272617, 4.541515, -5.2753224,
-3.2196746, 3.1303136, -7.285681, 9.041425, 5.6417427,
-9.93667, 5.7548947, -5.113397, -8.544622, 4.182665,
-7.7709813, -3.2810235, -3.312072, 3.8900535, -2.0604856,
6.709082, -8.461194, 1.2666026, 4.8770437, 2.6955879,
3.0340345, -1.1614609, -3.536341, -7.090382, -5.36146,
9.072544, 6.4554095, -4.4728956, -1.88395, 3.1095037,
8.782348, -3.316743, -8.65248, 1.6802986, 8.186188,
2.1783829, 4.931278, 4.158475, 1.4033595, -11.320101,
-3.7084908, -6.740436, -2.5555193, -1.0451177, -6.5569925,
0.82810307, 8.505919, 8.332857, -9.488569, -0.21588463,
-8.056692, 8.493993, 7.6401625, 8.812983, -9.377281,
2.4369764, 3.1766508, 0.6300803, 5.6666765, -7.913654,
-0.42301777, 4.506412, -7.8954244, 10.904591, 5.042256,
-9.626183, 8.347351, -3.605006, -7.923387, 1.1024277,
-8.705793, -2.5151258, -2.5066147, 4.0515003, -2.060757,
6.2635093, 8.286584, -6.0509276, -6.76452, -3.1158175,
1.6578803, -1.4608748, -1.24211, 8.151246, -4.2970877,
6.093071, 7.4911637, 4.51018, 4.8425875, 9.211085,
-2.4386222, 4.5830803, -5.6079445, 2.3713675, -4.0707507,
3.1787417, 5.462342, 6.915912, 6.3928423, -7.2970796,
5.0112796, -9.140893, 4.9990606, 0.38391754, 7.7088532,
1.9340848, 8.18833, 8.16617, -9.42086, -0.3388326,
-9.659727, 8.243045, 8.099073, 8.439428, -7.038694,
2.1077902, 3.3866816, -1.9975324, 7.4972878, -7.2525196,
-1.553731, 4.08758, -6.6922374, 9.50525, 4.026735,
-9.243538, 7.2740564, -3.9319072, -6.3228955, 1.6693478,
-7.923119, -3.7423058, -2.2813146, 5.3469067, -1.8285407,
3.3118162, 8.826356, -4.4641976, -6.4751124, -9.200089,
-2.519147, 4.225298, 2.4105988, -0.4344186, 0.53441775,
5.2836394, -8.2816105, -4.996147, -1.6870759, -7.8543897,
-3.9788852, -7.0346904, -3.1289773, 7.4567637, -5.6227813,
1.0709786, -8.866012, 8.427324, -1.1755563, -5.789216,
-8.197835, 5.3342214, 6.0646234, -6.8975716, 7.717031,
3.480355, 8.312151, -3.6645212, -3.0976524, -8.090359,
-1.9176173, 2.4257212, 1.9700835, 0.4098958, 2.1341088,
7.652741, -9.9595585, -5.989757, 0.10119354, -7.935407,
-5.792786, -5.22783, -4.318978, 5.414037, -6.4621663,
1.670883, -6.9224787, 8.696932, -2.0214002, -6.6681314,
-8.326418, 4.9049683, 5.4442496, -6.403739, 7.5822453,
7.0972915, -9.072851, -0.23897195, 1.7662339, 5.3096304,
1.983179, -2.222645, -0.34700772, -9.094717, -6.107907,
9.525174, 8.1550665, -5.6940084, -4.1636486, 1.7360662,
8.528821, -3.7299833, -9.341266, 2.608542, 9.108706,
0.7978509, 4.2488184, 2.454484, 0.9446999, -10.106636,
-3.8973773, -6.6566644, -4.5647273, -0.99837756, -6.568582,
9.324853, -7.9020953, 2.0910501, 2.2896829, 1.6790711,
1.3159255, -3.5258796, 1.8898442, -8.105812, -4.924962,
8.771129, 7.1202874, -5.991957, -3.4106019, 2.4450088,
7.796387, -3.055946, -7.8971434, 1.9856719, 9.001636,
1.8511922, 3.019749, 3.1227696, 0.4822102, -10.021213,
-3.530504, -6.225959, -3.0029628, -1.7881511, -7.3879776,
1.3925704, 9.499782, -3.7318087, -3.7074296, -7.7466836,
-1.5284524, 4.0535855, 3.112011, 0.10340207, -0.5429599,
6.67026, -9.155924, -4.924038, 0.64248866, -10.0103655,
-3.2742946, -4.850029, -3.6707063, 8.586258, -5.855605,
4.906918, -6.7813993, 7.9938135, -2.5473144, -5.688948,
-7.822478, 2.1421318, 4.66659, -9.701272, 9.549149,
0.8998125, -8.651497, -0.56899565, -8.639817, 2.3088377,
2.1264515, 3.2764478, 2.341989, 8.594338, 8.630639,
2.8440373, 6.2043204, 4.433932, 0.6320018, -1.8179281,
5.09452, -1.5741565, 8.153934, 8.744339, -3.6945698,
-8.883078, 1.5329908, 5.2745943, 0.44716078, 4.8809066,
-7.9594903, 1.134374, 9.233994, 6.5528665, -4.520542,
9.477355, -8.622195, -0.23191702, 2.0485356, 3.9379985,
1.5916302, -1.4516805, -0.0843819, -7.8554378, -5.88308,
7.999766, 6.2572145, -5.585321, -4.0097756, 0.42382592,
6.160884, -3.631315, -8.333449, 2.770595, 7.8495173,
3.3331623, 4.940415, 3.6207345, -0.037517, -11.034698,
-3.185103, -6.614664, -3.2177854, -2.0792234, -6.8879867,
7.821685, -8.455084, 1.0784642, 4.0033927, 2.7343264,
2.6052725, -4.1224284, -0.89305353, -6.8267674, -4.9715133,
8.880253, 5.6994023, -5.9695024, -4.9181266, 1.3017995,
7.972617, -3.9452884, -10.424556, 2.4504194, 6.21529,
0.93840516, 4.2070026, 6.159839, 0.91979957, -8.706724,
-4.317946, -6.6823545, -3.0388, -2.464262, -7.3716645,
1.3926703, 6.544412, -5.6251183, -5.122411, -8.622049,
-2.3905911, 3.9138813, 1.9779967, -0.05011125, 0.13310997,
7.229751, -9.742043, -8.08724, 1.2426697, -7.9230795,
-3.3162494, -7.129571, -3.5488048, 7.4701195, -5.2357526,
0.5917681, -6.272206, 6.342328, -2.909731, -4.991607,
-8.845513, 3.3228495, 7.033246, -7.8180246, 8.214469,
6.3910093, 9.185153, -6.20472, -7.713809, -3.8481297,
3.5579286, 0.7078448, -3.2893546, 7.384514, -4.448121,
3.0104196, 9.492943, 8.024847, 4.9114385, 9.965594,
-3.014036, 5.182494, -5.8806014, 2.5312455, -5.9926524,
4.474469, 6.3717875, 6.993105, 6.493093, -8.935534,
3.004074, -8.055647, 8.315765, -1.3026813, 8.250377,
0.02606229, 6.8508425, 9.655665, -7.0116496, -0.41060972,
-10.049198, 7.897801, 6.7791023, 8.3362, -9.821014,
2.491157, 3.5160472, -1.6228812, 7.398063, -8.769123,
-3.1743705, 3.2827861, -6.497855, 10.831924, 5.2761307,
-9.704417, 4.3817043, -3.9841619, -8.111647, 1.1883026,
-8.115312, -2.9240117, -5.8879666, 4.20928, -0.3587938,
6.935672, -10.177582, 0.48819053, 3.1250648, 2.9306343,
3.082544, -3.477687, -1.3768549, -7.4922366, -3.756631,
10.039836, 3.6670392, -5.9761434, -4.4728765, 3.244255,
7.027899, -2.3806512, -10.4100685, 1.605716, 7.7953773,
0.5408159, 1.7156523, 3.824097, -1.0604783, -10.142124,
-5.246805, -6.5283823, -4.579547, -2.42714, -6.709197,
2.7782338, 7.33353, -6.454507, -2.9929368, -7.8362985,
-2.695445, 2.4900775, 1.6682367, 0.4641757, -1.0495365,
6.9631333, -9.291356, -8.23837, -0.34263706, -8.275113,
-2.8454232, -5.0864096, -2.681942, 7.5450225, -6.2517986,
0.06810654, -6.470652, 4.9042645, -1.8369255, -6.6937943,
-7.9625087, 2.8510258, 6.180508, -8.282598, 7.919079,
1.4897474, 6.7217417, -4.2459426, -4.114431, -8.375707,
-2.143264, 5.6972933, 1.5574739, 0.39375135, 1.7930849,
5.1737595, -7.826241, -5.160268, -0.80433255, -7.839536,
-5.2620406, -5.4643164, -3.185536, 6.620315, -7.065227,
1.0524757, -6.125088, 5.7126627, -1.6161644, -3.852159,
-9.164279, 2.7005782, 5.946544, -8.468236, 8.2145405,
1.1035942, 6.590157, -4.0461283, -4.8090615, -7.6702685,
-2.1121511, 5.1147075, 1.6128504, 2.0064135, 1.0544407,
6.0038295, -7.8282537, -4.801278, 0.32349443, -8.0649805,
-4.372714, -5.61336, -5.21394, 8.176595, -5.4753284,
1.7800134, -8.267283, 7.2133374, -0.16594432, -6.317046,
-9.490406, 4.1261597, 5.473317, -7.7551675, 7.007468,
7.478628, -8.801905, 0.10975724, 3.5478222, 4.797803,
1.3825226, -3.357369, 0.99262005, -6.94877, -5.4781394,
9.632604, 5.7492557, -5.9014316, -3.1632116, 2.340859,
8.708098, -3.1255999, -8.848661, 4.5612836, 8.455157,
0.73460823, 4.112301, 4.392744, -0.30759293, -6.8036823,
-3.0331545, -8.269506, -2.82415, -0.9411246, -5.993506,
2.1618164, -8.716055, -0.7432543, -10.255819, 3.095418,
2.5131428, 4.752442, 0.9907621, 7.8279433, 7.85814,
0.50430876, 5.2840405, 4.457291, 0.03330028, -0.40692952,
3.9244103, -2.117118, 7.6977615, 8.759009, -4.2157164,
-9.136053, 3.247858, 4.668686, 0.76162136, 5.3833632,
-9.231471, 0.44309422, 8.380872, 6.7211227, -3.091507,
2.173508, -9.038242, -1.3666698, -9.819077, 0.37825826,
2.3898845, 4.2440815, 1.9161536, 7.24787, 6.9124637,
1.6238527, 5.1140285, 3.1935842, 1.02845, -1.1273454,
5.638998, -2.497932, 8.342559, 8.586319, -2.9069402,
-7.6387944, 3.5975037, 4.4115705, 0.41506064, 4.9078383,
-9.68327, 1.8159529, 9.744613, 8.40622, -4.495336,
9.244892, -8.789869, 1.3158468, 4.018167, 3.3922846,
2.652022, -2.7495477, 0.2528986, -8.268324, -6.004913,
10.428784, 6.6580734, -5.537176, -1.7177434, 2.7504628,
6.7735, -2.4454272, -9.998361, 2.9483433, 6.8266654,
2.3787718, 4.472637, 2.5871701, 0.7355365, -7.7027745,
-4.1879907, -7.172832, -4.1843605, -0.03646783, -5.419406,
6.958486, 11.011111, -7.1821184, -7.956423, -3.408451,
4.6850276, -2.348787, -4.398289, 6.9787564, -3.8324208,
5.967827, 8.433518, 4.660108, 5.5657144, 9.964243,
-1.3515275, 6.404833, -6.4805903, 2.4379845, -6.0816774,
1.752272, 5.3771873, 6.9613523, 6.9788294, -6.3894596,
3.7521114, -6.8034263, 6.4458385, -0.7233525, 10.512529,
4.362273, 9.231461, -6.3382263, -7.659, -3.461823,
4.71463, 0.17817476, -3.685746, 7.2962036, -4.6489477,
5.218017, 11.546999, 4.7218375, 6.8498397, 9.281103,
-3.900459, 6.844054, -7.0886965, -0.05019227, -8.233724,
5.5808983, 6.374517, 8.321048, 7.969449, -7.3478637,
1.4917561, -8.003144, 4.780668, -1.1981848, 7.753739,
2.0260844, -8.880096, -3.4258451, -7.141975, 1.9637157,
1.814725, 5.311151, 1.4831505, 7.8483663, 7.257948,
1.395786, 6.417756, 5.376912, 0.59505713, 0.00062552,
3.6634305, -4.159713, 7.3571978, 10.966816, -2.5419605,
-8.466229, 1.904205, 5.6338267, -0.52567476, 5.59736,
-8.361799, 0.5009981, 8.460681, 7.3891273, -3.5272243,
5.0552278, 9.921456, -7.69693, -7.286378, -1.9198836,
3.1666567, -2.5832257, -2.2445817, 9.888111, -5.076563,
5.677401, 7.497946, 5.662994, 5.414262, 8.566503,
-2.5530663, 7.1032815, -6.0612082, 1.3419591, -4.9595256,
4.3377542, 4.3790717, 6.793512, 8.383502, -7.1278043,
3.3240774, -9.379446, 6.838661, -0.81241214, 8.694813,
0.79141915, 7.632467, 8.575382, -8.533798, 0.28954387,
-7.5675836, 5.8653326, 8.97235, 7.1649346, -10.575289,
0.9359381, 5.02381, -0.5609511, 5.543464, -7.69131,
-2.1792977, 2.4729247, -6.1917787, 10.373678, 7.6549597,
-8.809486, 5.5657206, -3.3169382, -8.042887, 2.0874746,
-7.079005, -3.33398, -3.6843317, 4.0172358, -2.0754814,
1.1726758, 7.4618697, 6.9483604, -8.469206, 0.7401797,
-10.318176, 8.384557, 10.5476265, 9.146971, -9.250223,
0.6290606, 4.4941425, -0.7514017, 7.2271705, -8.309598,
-1.4761636, 4.0140634, -6.021102, 9.132852, 5.6610966,
-11.249811, 8.359293, -1.9445792, -7.7393436, -0.3931331,
-8.824441, -2.5995944, -2.5714035, 4.140213, -3.6863053,
5.517265, 9.020411, -4.9286127, -7.871219, -3.7446704,
2.5179656, -1.4543481, -2.2703636, 7.010597, -3.6436229,
6.753862, 7.4129915, 7.1406755, 5.653706, 9.5445175,
0.15698843, 4.761813, -7.698002, 1.6870106, -4.5410123,
4.171763, 5.3747005, 6.341021, 7.456738, -8.231657,
2.763487, -9.208167, 6.676799, -1.1957736, 10.062605,
4.0975976, 7.312957, -2.4981596, -2.9658387, -8.150425,
-2.1075552, 2.64375, 1.6636052, 1.1483809, 0.09276015,
5.8556347, -7.8481026, -5.9913163, -0.02840613, -9.937289,
-1.0486673, -5.2340155, -3.83912, 7.7165728, -8.409944,
0.80863273, -6.9119215, 7.5712357, 0.36031485, -6.056131,
-8.470033, 1.8678337, 3.0121377, -7.3096333, 8.205484,
5.262654, 8.774514, -4.7603083, -7.2096143, -4.437014,
3.6080024, -1.624254, -4.2787876, 8.880863, -4.8984556,
5.1782074, 9.944454, 3.911282, 3.5396595, 8.867042,
-1.2006199, 5.393288, -5.6455317, 0.7829499, -4.0338907,
2.479272, 6.5080743, 8.582535, 7.0097537, -6.9823785,
3.984318, -7.225381, 5.3135114, -1.0391048, 8.951443,
-0.70119005, -8.510742, -0.42949116, -10.9224825, 2.8176029,
1.6800792, 5.778404, 1.7269998, 7.1975236, 7.7258267,
2.7632928, 5.3399253, 3.4650044, 0.01971426, -1.6468811,
4.114996, -1.5110453, 6.8689218, 8.269899, -3.1568048,
-7.0344677, 1.2911975, 5.950357, 0.19028673, 4.657226,
-8.199647, 2.246055, 8.989509, 5.3101015, -4.2400866};
std::vector<float> X_embedded = {
-0.41849962, -0.53906363, 0.46958843, -0.35832694, -0.23779503,
-0.29751351, -0.01072748, -0.21353109, -0.54769957, -0.55086273,
0.37093949, -0.12714292, -0.06639574, -0.36098689, -0.13060696,
-0.07362658, -1.01205945, -0.39285606, 0.2864089, -0.32031146,
-0.19595343, 0.08900568, -0.04813879, -0.06563424, -0.42655188,
-0.69014251, 0.51459783, -0.1942696, -0.07767916, -0.6119386,
0.04813685, -0.22557008, -0.56890118, -0.60293794, 0.43429622,
-0.09240723, -0.00624062, -0.25800395, -0.1886092, 0.01655941,
-0.01961523, -0.14147359, 0.41414487, -0.8512944, -0.61199242,
-0.18586016, 0.14024924, -0.41635606, -0.02890144, 0.1065347,
0.39700791, -1.14060664, -0.95313865, 0.14416681, 0.17306046,
-0.53189689, -0.98987544, -0.67918193, 0.41787854, -0.20878236,
-0.06612862, 0.03502904, -0.03765266, -0.0980606, -0.00971657,
0.29432917, 0.36575687, -1.1645509, -0.89094597, 0.03718805,
0.2310573, -0.38345811, -0.10401925, -0.10653082, 0.38469055,
-0.88302094, -0.80197543, 0.03548668, 0.02775662, -0.54374295,
0.03379983, 0.00923623, 0.29320273, -1.05263519, -0.93360096,
0.03778313, 0.12360487, -0.56437284, 0.0644429, 0.33432651,
0.36450726, -1.22978747, -0.83822101, -0.18796451, 0.34888434,
-0.3801491, -0.45327303, -0.59747899, 0.39697698, -0.15616602,
-0.06159166, -0.40301991, -0.11725303, -0.11913263, -0.12406619,
-0.11227967, 0.43083835, -0.90535849, -0.81646025, 0.10012121,
-0.0141237, -0.63747931, 0.04805023, 0.34190539, 0.50725192,
-1.17861414, -0.74641538, -0.09333111, 0.27992678, -0.56214809,
0.04970971, 0.36249384, 0.57705611, -1.16913795, -0.69849908,
0.10957897, 0.27983218, -0.62088525, 0.0410459, 0.23973398,
0.40960434, -1.14183664, -0.83321381, 0.02149482, 0.21720445,
-0.49869928, -0.95655465, -0.51680422, 0.45761383, -0.08351214,
-0.12151554, 0.00819737, -0.20813803, -0.01055793, 0.25319234,
0.36154974, 0.1822421, -1.15837133, -0.92209691, -0.0501582,
0.08535917, -0.54003763, -1.08675635, -1.04009593, 0.09408128,
0.07009826, -0.01762833, -0.19180447, -0.18029785, -0.20342001,
0.04034991, 0.1814747, 0.36906669, -1.13532007, -0.8852452,
0.0782818, 0.16825101, -0.50301319, -0.29128098, -0.65341312,
0.51484352, -0.38758236, -0.22531103, -0.55021971, 0.10804344,
-0.3521522, -0.38849035, -0.74110794, 0.53761131, -0.25142813,
-0.1118066, -0.47453368, 0.06347904, -0.23796193, -1.02682328,
-0.47594091, 0.39515916, -0.2782529, -0.16566519, 0.08063579,
0.00810116, -0.06213913, -1.059654, -0.62496334, 0.53698546,
-0.11806234, 0.00356161, 0.11513405, -0.14213292, 0.04102662,
-0.36622161, -0.73686272, 0.48323864, -0.27338892, -0.14203401,
-0.41736352, 0.03332564, -0.21907479, -0.06396769, 0.01831361,
0.46263444, -1.01878166, -0.86486858, 0.17622118, -0.01249686,
-0.74530888, -0.9354887, -0.5027945, 0.38170099, -0.15547098,
0.00677824, -0.04677663, -0.13541745, 0.07253501, -0.97933143,
-0.58001202, 0.48235369, -0.18836913, -0.02430783, 0.07572441,
-0.08101331, 0.00630076, -0.16881248, -0.67989182, 0.46083611,
-0.43910736, -0.29321918, -0.38735861, 0.07669903, -0.29749861,
-0.40047669, -0.56722462, 0.33168188, -0.13118173, -0.06672747,
-0.56856316, -0.26269144, -0.14236671, 0.10651901, 0.4962585,
0.38848072, -1.06653547, -0.64079332, -0.47378591, 0.43195483,
-0.04856951, -0.9840439, -0.70610428, 0.34028092, -0.2089237,
-0.05382041, 0.01625874, -0.02080803, -0.12535211, -0.04146428,
-1.24533033, 0.48944879, 0.0578458, 0.26708388, -0.90321028,
0.35377088, -0.36791429, -0.35382384, -0.52748734, 0.42854419,
-0.31744713, -0.19174226, -0.39073724, -0.03258846, -0.19978228,
-0.36185205, -0.57412046, 0.43681973, -0.25414538, -0.12904905,
-0.46334973, -0.03123853, -0.11303604, -0.87073672, -0.45441297,
0.41825858, -0.25303507, -0.21845073, 0.10248682, -0.11045569,
-0.10002795, -0.00572806, 0.16519061, 0.42651513, -1.11417019,
-0.83789682, 0.02995787, 0.16843079, -0.53874511, 0.03056994,
0.17877036, 0.49632853, -1.03276777, -0.74778616, -0.03971953,
0.10907949, -0.67385727, -0.9523471, -0.56550741, 0.40409449,
-0.2703723, -0.10175014, 0.13605487, -0.06306008, -0.01768126,
-0.4749442, -0.56964815, 0.39389887, -0.19248079, -0.04161081,
-0.38728487, -0.20341556, -0.12656988, -0.35949609, -0.46137866,
0.28798422, -0.06603147, -0.04363992, -0.60343552, -0.23565227,
-0.10242701, -0.06792886, 0.09689897, 0.33259571, -0.98854214,
-0.84444433, 0.00673901, 0.13457057, -0.43145794, -0.51500046,
-0.50821936, 0.38000089, 0.0132636, 0.0580942, -0.40157595,
-0.11967677, 0.02549113, -0.10350953, 0.22918226, 0.40411913,
-1.05619383, -0.71218503, -0.02197581, 0.26422262, -0.34765676,
0.06601537, 0.21712676, 0.34723559, -1.20982027, -0.95646334,
0.00793948, 0.27620381, -0.43475035, -0.67326003, -0.6137197,
0.43724492, -0.17666136, -0.06591748, -0.18937394, -0.07400128,
-0.06881691, -0.5201112, -0.61088628, 0.4225319, -0.18969463,
-0.06921366, -0.33993208, -0.06990873, -0.10288513, -0.70659858,
-0.56003648, 0.46628812, -0.16090363, -0.0185108, -0.1431348,
-0.1128775, -0.0078648, -0.02323332, 0.04292452, 0.39291084,
-0.94897962, -0.63863206, -0.16546988, 0.23698957, -0.30633628};
raft::handle_t h;
cudaStream_t stream = h.get_stream();
auto d_alloc = h.get_device_allocator();
float* d_X = (float*)d_alloc->allocate(X.size() * sizeof(float), stream);
float* d_X_embedded =
(float*)d_alloc->allocate(X_embedded.size() * sizeof(float), stream);
raft::update_device(d_X, X.data(), X.size(), stream);
raft::update_device(d_X_embedded, X_embedded.data(), X_embedded.size(),
stream);
// euclidean test
score =
trustworthiness_score<float,
raft::distance::DistanceType::L2SqrtUnexpanded>(
h, d_X, d_X_embedded, 50, 30, 8, 5);
d_alloc->deallocate(d_X, X.size() * sizeof(float), stream);
d_alloc->deallocate(d_X_embedded, X_embedded.size() * sizeof(float),
stream);
}
void SetUp() override { basicTest(); }
void TearDown() override {}
protected:
double score;
};
typedef TrustworthinessScoreTest TrustworthinessScoreTestF;
TEST_F(TrustworthinessScoreTestF, Result) {
ASSERT_TRUE(0.9374 < score && score < 0.9376);
}
|
d7e94e3abc08045718f82158536dd2967049fb3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Fix for gcc 4.7
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#include "grid.cuh"
#include "cudahelper.cuh"
#include "helper_math.h"
#include "parameterspathplanner.cuh"
#include "pathplanner.cuh"
// only for printf debugging
#include <stdlib.h>
#include <stdio.h>
// simulation parameters in constant memory
__constant__ ParametersPathPlanner parametersPathPlanner;
__constant__ Grid growingGrid;
void copyParametersToGpu(ParametersPathPlanner *hostParams)
{
// Copy parameters to constant memory.
cudaSafeCall(hipMemcpyToSymbol(parametersPathPlanner, hostParams, sizeof(ParametersPathPlanner)));
}
__global__
void fillOccupancyGridD(u_int8_t* gridValues, const float4* colliderPos, unsigned int numColliders, unsigned int numCells)
{
uint colliderIndex = getThreadIndex1D();
if(colliderIndex >= numColliders) return;
const float4 particlePosition = colliderPos[colliderIndex];
if(parametersPathPlanner.grid.isPositionInGrid(particlePosition))
{
// get grid-cell of particle
int3 particleGridCell = parametersPathPlanner.grid.getCellCoordinate(make_float3(particlePosition));
// The cell-hash IS the offset in memory, as cells are adressed linearly
int cellHash = parametersPathPlanner.grid.getCellHash(particleGridCell);
if(cellHash >= 0 && cellHash < numCells)
{
gridValues[cellHash] = 255;
}
else
{
printf("ERROR, position was supposed to be in grid! We have %d cells and want to write to cell %d.\n\n\n", numCells, cellHash);
}
}
}
__global__
void dilateOccupancyGridD(u_int8_t* gridValues, unsigned int numCells)
{
// Dilate the occupied cells for additional safety. This also allows expanding routes diagonally
// later-on, as its ok to pass diagonally between occupied cells after dilation
uint cellIndex = getThreadIndex1D();
if(cellIndex >= numCells) return;
u_int8_t ownValue = gridValues[cellIndex];
if(ownValue == 255) return;
int3 threadGridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(cellIndex);
for(int z=-1;z<=1;z++)
{
for(int y=-1;y<=1;y++)
{
for(int x=-1;x<=1;x++)
{
const int3 neighbourGridCellCoordinate = threadGridCellCoordinate + make_int3(x,y,z);
//if(cellIndex == 0) printf("cellIndex 0, coord 0/0/0 neighbor %d/%d/%d\n", x, y, z);
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getSafeCellHash(neighbourGridCellCoordinate);
// Because CUDA works using thread-batches, we cannot just load all cells, then compute and then store all cells.
// Using batches would mean we would dilate a part of the grid, then load the neighboring part and dilate the
// dilation again, making almost all of the grid become occupied.
// For this reason, we say that 255 is occupied and 254 is dilated-occupied. This way, we don't need two grids. Hah!
if(gridValues[neighbourGridCellIndex] == 255)
{
gridValues[cellIndex] = 254;
return;
}
}
}
}
}
}
__global__ void clearOccupancyGridAboveVehiclePositionD(
unsigned char* gridValues,
float vehicleX,
float vehicleY,
float vehicleZ)
{
float3 vehiclePos = make_float3(vehicleX, vehicleY, vehicleZ);
int3 gridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(vehiclePos);
// for(int z=-1;z<=1;z++)
// {
// for(int x=-1;x<=1;x++)
// {
for(int y=0;y<=2;y++)
{
// We want to clear only the vehicle's cell 2 cells above it.
// if(y == 0 && (x != 0 || z != 0)) continue;
int3 neighbourGridCellCoordinate = gridCellCoordinate + make_int3(0,y,0);
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getCellHash(neighbourGridCellCoordinate);
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourGridCellCoordinate);
printf("clearOccupancyGridAboveVehiclePositionD(): clearing cell centered at %.2f / %.2f / %.2f\n", cellCenter.x, cellCenter.y, cellCenter.z);
gridValues[neighbourGridCellIndex] = 0;
}
}
// }
// }
}
void clearOccupancyGridAboveVehiclePosition(
unsigned char* gridValues,
float vehicleX,
float vehicleY,
float vehicleZ,
hipStream_t *stream)
{
hipLaunchKernelGGL(( clearOccupancyGridAboveVehiclePositionD), dim3(1), dim3(1), 0, *stream, gridValues, vehicleX, vehicleY, vehicleZ);
cudaCheckSuccess("clearOccupancyGridAboveVehiclePosition");
}
// If startSearchNumberOfCellsAbove is 0, we will start searching for free cells in the waypoint's cell and then further upwards and outwards
// If startSearchNumberOfCellsAbove is 3, we will start searching for free cells 3 cells above the waypoint's cell and then further upwards and outwards
__global__ void moveWayPointsToSafetyD(unsigned char* gridValues, float4* deviceWaypoints, unsigned int numberOfWayPoints, unsigned int startSearchNumberOfCellsAbove)
{
uint wptIndex = getThreadIndex1D();
if(wptIndex >= numberOfWayPoints) return;
float4 waypoint = deviceWaypoints[wptIndex];
if(!parametersPathPlanner.grid.isPositionInGrid(waypoint))
{
printf("error, waypoint %d is not even in the grid!\n", wptIndex);
deviceWaypoints[wptIndex] = make_float4(0.0);
return;
}
int3 gridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(make_float3(waypoint));
if(!parametersPathPlanner.grid.isCellInGrid(gridCellCoordinate))
{
printf("moveWayPointsToSafetyD: error, this doesn't make sense at all!");
return;
}
int searchOrderHorizontal[3];
searchOrderHorizontal[0] = 0;
searchOrderHorizontal[1] = -1;
searchOrderHorizontal[2] = +1;
bool freeCellFound = false;
if(wptIndex == 0) printf("cell height is %.2f meters\n", parametersPathPlanner.grid.getCellSize().y);
// With a scanner range of 15m, how many cells should we search upwards of the waypoint candidate?
unsigned int maxNumberOfGridCellsToGoUp = 15.0 / parametersPathPlanner.grid.getCellSize().y;
printf("waypoint %d at %.2f/%.2f/%.2f will search up to %d cells up.\n", wptIndex, waypoint.x, waypoint.y, waypoint.z, maxNumberOfGridCellsToGoUp);
for(int z=0;z<3 && !freeCellFound;z++)
{
for(int x=0;x<3 && !freeCellFound;x++)
{
for(int y=startSearchNumberOfCellsAbove;y<maxNumberOfGridCellsToGoUp && !freeCellFound;y++)
{
const int3 neighbourGridCellCoordinate = gridCellCoordinate + make_int3(searchOrderHorizontal[x],y,searchOrderHorizontal[z]);
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getSafeCellHash(neighbourGridCellCoordinate);
if(gridValues[neighbourGridCellIndex] == 0)
{
freeCellFound = true;
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourGridCellCoordinate);
deviceWaypoints[wptIndex] = make_float4(cellCenter, waypoint.w);
printf("waypoint %d found free neighbor at %.2f/%.2f/%.2f.\n", wptIndex, cellCenter.x, cellCenter.y, cellCenter.z);
}
}
}
}
}
// The waypoint is unusable, remove it!
if(!freeCellFound)
{
printf("waypoint %d found no free neighbor.\n", wptIndex);
deviceWaypoints[wptIndex] = make_float4(0.0);
}
}
// Will move the waypoints to cells that are free in gridOccupancy. If the w-component is untouched (and non-zero),
// it was possible to move them to free zones. Waypoints with w-component of zero could not find a free neighboring cell.
void moveWayPointsToSafetyGpu(
unsigned char* gridOccupancy,
float* mDeviceWaypoints,
unsigned int numberOfWayPoints,
unsigned int startSearchNumberOfCellsAbove,
hipStream_t* stream)
{
uint numThreads, numBlocks;
computeExecutionKernelGrid(numberOfWayPoints, 64, numBlocks, numThreads);
hipLaunchKernelGGL(( moveWayPointsToSafetyD), dim3(numBlocks), dim3(numThreads), 0, *stream, gridOccupancy, (float4*)mDeviceWaypoints, numberOfWayPoints, startSearchNumberOfCellsAbove);
cudaCheckSuccess("moveWayPointsToSafetyGpu");
}
void fillOccupancyGrid(unsigned char* gridValues, const float* colliderPos, unsigned int numColliders, unsigned int numCells, hipStream_t *stream)
{
if(numColliders == 0) return;
// set all cells to empty
cudaSafeCall(hipMemset(gridValues, 0, numCells * sizeof(unsigned char)));
uint numThreads, numBlocks;
computeExecutionKernelGrid(numColliders, 64, numBlocks, numThreads);
printf("fillOccupancyGrid(): using %d colliders at %p to fill occupancy grid with %d cells at %p.\n", numColliders, colliderPos, numCells, gridValues);
hipLaunchKernelGGL(( fillOccupancyGridD), dim3(numBlocks), dim3(numThreads), 0, *stream, gridValues, (float4*)colliderPos, numColliders, numCells);
cudaCheckSuccess("fillOccupancyGrid");
printf("fillOccupancyGrid(): done.\n");
}
void dilateOccupancyGrid(unsigned char* gridValues, unsigned int numCells, hipStream_t *stream)
{
printf("dilateOccupancyGrid(): dilating %d cells.\n", numCells);
if(numCells == 0) return;
uint numThreads, numBlocks;
computeExecutionKernelGrid(numCells, 64, numBlocks, numThreads);
cudaCheckSuccess("dilateOccupancyGridDBefore");
hipLaunchKernelGGL(( dilateOccupancyGridD), dim3(numBlocks), dim3(numThreads), 0, *stream, gridValues, numCells);
cudaCheckSuccess("dilateOccupancyGridDAfter");
printf("dilateOccupancyGrid(): done.\n");
}
__device__
int bound(int min, int value, int max)
{
if(value < min)
return min;
else if(value > max)
return max;
else
return value;
}
__global__ void markStartCellD(u_int8_t* gridValues)
{
int3 cellCoordinateStart = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.start);
int cellIndexStart = parametersPathPlanner.grid.getSafeCellHash(cellCoordinateStart);
if(cellIndexStart > 0)
{
printf("markStartCellD(): setting start cell %d to 1\n", cellIndexStart);
gridValues[cellIndexStart] = 1;
}
else
{
printf("markStartCellD(): start cell %.1f/%.1f/%.1f is outside grid!\n", parametersPathPlanner.start.x, parametersPathPlanner.start.y, parametersPathPlanner.start.z);
}
}
__global__
void growGridD(u_int8_t* gridValues, Grid subGrid)
{
uint subGridCellHash = getThreadIndex1D();
if(subGridCellHash >= subGrid.getCellCount()) return;
float3 subGridCellCenter = subGrid.getCellCenter(subGrid.getCellCoordinate(subGridCellHash));
int3 superGridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(subGridCellCenter);
unsigned int superGridCellHash = parametersPathPlanner.grid.getCellHash(superGridCellCoordinate);
u_int8_t lowestNonNullNeighbor = 254; // thats a dilated cell's value
u_int8_t ownValue = gridValues[superGridCellHash];
if(ownValue == 0)
{
// Check all neighbors for the lowest value d != 0,254,255 and put d++ into our own cell.
for(int z=-1;z<=1;z++)
{
for(int y=-1;y<=1;y++)
{
for(int x=-1;x<=1;x++)
{
// don't look into our own cell for neighboring values!
if(x == 0 && y == 0 && z == 0)
{
//printf("will not check myself for neighbors.\n");
continue;
}
const int3 neighbourGridCellCoordinate = superGridCellCoordinate + make_int3(x,y,z);
// Border-cells might ask for neighbors outside of the grid.
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getCellHash(neighbourGridCellCoordinate);
const u_int8_t neighborValue = gridValues[neighbourGridCellIndex];
// Find the lowest neighbor that is neither 0 nor 254/255
if(neighborValue < lowestNonNullNeighbor && neighborValue != 0)
lowestNonNullNeighbor = neighborValue;
}
else
{
// @subGrid should be clamped to the super grid, so this happens only when checking the non-existing neighbors of border cells
/*printf("bug, neighborgridcellindex is %d, super-coord was %d/%d/%d, neighbor-coord was %d/%d/%d\n",
neighbourGridCellIndex,
superGridCellCoordinate.x,
superGridCellCoordinate.y,
superGridCellCoordinate.z,
neighbourGridCellCoordinate.x,
neighbourGridCellCoordinate.y,
neighbourGridCellCoordinate.z);*/
}
}
}
}
// Write our cell's value. A cell first contains a 0, then the neighborCellValue+1. Once it does
// contain a value, it will never change. We're only interested in replacing the value with lower
// numbers, but since the values spread like a wave, that'll never happen.
if(lowestNonNullNeighbor < 254/* && ownValue == 0*/)
{
/*printf("found value %d in neighbor, setting sub-cell %d / super-cell %d (%d/%d/%d) from %d to %d\n",
lowestNonNullNeighbor,
subGridCellHash,
superGridCellHash,
superGridCellCoordinate.x, superGridCellCoordinate.y, superGridCellCoordinate.z,
ownValue,
lowestNonNullNeighbor + 1);*/
gridValues[superGridCellHash] = lowestNonNullNeighbor + 1;
}
else
{
/*printf("failed to find an interesting neighbor for sub-grid-cell %d, super-grid-cell %d (%3d/%3d/%3d) with value %d\n",
subGridCellHash,
superGridCellHash,
superGridCellCoordinate.x,
superGridCellCoordinate.y,
superGridCellCoordinate.z,
ownValue);*/
}
}
else
{
/*printf("sub-grid-cell %d, super-grid-cell %d (%3d/%3d/%3d) already has value %d\n",
subGridCellHash,
superGridCellHash,
superGridCellCoordinate.x,
superGridCellCoordinate.y,
superGridCellCoordinate.z,
ownValue);*/
}
}
void markStartCell(unsigned char* gridValues, hipStream_t *stream)
{
// set the cell containing "start" to 1!
hipLaunchKernelGGL(( markStartCellD), dim3(1), dim3(1), 0, *stream, gridValues);
cudaCheckSuccess("markStartCellD");
}
void growGrid(unsigned char* gridValues, ParametersPathPlanner* parameters, hipStream_t *stream)
{
uint numThreads, numBlocks;
const int3 cellCoordinateStart = parameters->grid.getCellCoordinate(parameters->start);
const unsigned int longestSideCellCount = parameters->grid.getLongestSideCellCount();
int3 thisIterationCellMin, thisIterationCellMax, lastCellMin, lastCellMax;
// Let the wave propagate as long as it might take to go from one corner to the opposing one
const int maxNumberOfSteps = sqrt(pow(longestSideCellCount,2) + pow(longestSideCellCount,2)) * 2;
for(int i=1;i<maxNumberOfSteps;i++)
{
thisIterationCellMin = parameters->grid.clampCellCoordinate(cellCoordinateStart + make_int3(-i, -i, -i));
thisIterationCellMax = parameters->grid.clampCellCoordinate(cellCoordinateStart + make_int3(+i, +i, +i));
/* disable this break, as it prevents paths from going back "inside"
*if(thisIterationCellMin == lastCellMin && thisIterationCellMax == lastCellMax)
{
// cell coordinates haven't changed, so we have grown the whole grid.
printf("growGrid(): stopping after iteration %d, as cellMin/cellMax haven't changed.\n", i);
break;
}*/
lastCellMin = thisIterationCellMin;
lastCellMax = thisIterationCellMax;
Grid iterationGrid;
iterationGrid.cells.x = thisIterationCellMax.x - thisIterationCellMin.x + 1;
iterationGrid.cells.y = thisIterationCellMax.y - thisIterationCellMin.y + 1;
iterationGrid.cells.z = thisIterationCellMax.z - thisIterationCellMin.z + 1;
float3 superGridCellSize = parameters->grid.getCellSize();
iterationGrid.worldMin = parameters->grid.getCellCenter(thisIterationCellMin) - superGridCellSize/2;
iterationGrid.worldMax = parameters->grid.getCellCenter(thisIterationCellMax) + superGridCellSize/2;
// cudaSafeCall(hipMemcpyToSymbol(growingGrid, iterationGrid, sizeof(Grid)));
computeExecutionKernelGrid(iterationGrid.getCellCount(), 64, numBlocks, numThreads);
printf("growGrid(): iteration %d of max %d: growing grid in %d/%d/%d = %d cells.\n",
i, maxNumberOfSteps, iterationGrid.cells.x, iterationGrid.cells.y, iterationGrid.cells.z, iterationGrid.getCellCount());
hipLaunchKernelGGL(( growGridD), dim3(numBlocks), dim3(numThreads), 0, *stream, gridValues, iterationGrid);
cudaCheckSuccess("growGridD");
}
}
__global__
void checkGoalCellD(unsigned char* gridValues, unsigned int numCells, unsigned int searchRange, unsigned int *status)
{
int3 goalGridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.goal);
int goalGridCellOffset = parametersPathPlanner.grid.getSafeCellHash(goalGridCellCoordinate);
uint valueInGoalCell = gridValues[goalGridCellOffset];
printf("checkGoalCellD(): value in goal cell at %.2f/%.2f/%.2f is %d.\n",
parametersPathPlanner.goal.x,
parametersPathPlanner.goal.y,
parametersPathPlanner.goal.z,
valueInGoalCell);
if(valueInGoalCell < 254)
{
return;
}
else
{
// Cell is occupied or dilated-occupied! Try to find an empty neighbor!
// With searchRange = 3, create an array {1,-1,2,-2,3,-3}
float *neighborsSearchOrder = new float[searchRange * 2];
for(int i=1;i<=searchRange;i++)
{
neighborsSearchOrder[2*i-2] = i;
neighborsSearchOrder[2*i-1] = -i;
}
//for(...)
delete neighborsSearchOrder;
}
}
// This method checks whether the goal cell is occupied. If so, it tries
// to find a free neighboring cell that can be used instead.
GoalCellStatus checkGoalCell(unsigned char* gridValues, unsigned int numCells, unsigned int searchRange, hipStream_t *stream)
{
if(numCells == 0) return GoalCellBlocked;
u_int32_t* statusDevice = 0;
cudaSafeCall(hipMalloc((void**)statusDevice, sizeof(u_int32_t)));
hipLaunchKernelGGL(( checkGoalCellD), dim3(1), dim3(1), 0, *stream, gridValues, numCells, searchRange, statusDevice);
cudaCheckSuccess("checkGoalCell");
u_int32_t statusHost;
cudaSafeCall(hipMemcpy(&statusHost, statusDevice, sizeof(u_int32_t), hipMemcpyDeviceToHost));
if(statusHost == 0)
{
return GoalCellFree;
}
else if(statusHost == 1)
{
return GoalCellMoved;
}
else
{
return GoalCellBlocked;
}
}
__global__
void retrievePathD(unsigned char* gridValues, float4* waypoints)
{
int3 gridCellGoalCoordinate = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.goal);
int gridCellGoalHash = parametersPathPlanner.grid.getSafeCellHash(gridCellGoalCoordinate);
int3 gridCellCoordinateStart = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.start);
uint valueInGoalCell = gridValues[gridCellGoalHash];
printf("retrievePathD(): value in goal cell at %.2f/%.2f/%.2f is %d.\n",
parametersPathPlanner.goal.x,
parametersPathPlanner.goal.y,
parametersPathPlanner.goal.z,
valueInGoalCell);
if(valueInGoalCell == 0)
{
// Tell the caller we failed to find a valid path by setting the first waypoint to all-zero.
waypoints[0] = make_float4(0.0, 0.0, 0.0, 0.0);
}
else if(valueInGoalCell >= 254)
{
// Tell the caller we failed to find a valid path because of an occupied target cell.
waypoints[0] = make_float4(0.0, 0.0, 0.0, 1.0);
}
else
{
// Use this ONE thread to collect all the waypoints. The first float4 will contain
// the number of waypoints including start and goal. The next float4s will be those
// waypoints. Add 0.1 so we can cast to int without losing something.
waypoints[0] = make_float4(valueInGoalCell + 0.1);
// Set the last waypoint, which equals the goal position
waypoints[valueInGoalCell] = make_float4(parametersPathPlanner.goal);
// Now traverse from goal back to start and save the world positions in waypoints
uint stepsToStartCell = valueInGoalCell;
int3 currentCellCoordinate = gridCellGoalCoordinate;
// Saves the direction/offset that we step to get to the next cell.
int3 lastTravelDirection;
do
{
// We are at cellCoordinate and found a value of distance. Now check all neighbors
// until we find one with a smaller value. That's the path backwards towards the goal.
bool foundNextCellTowardsTarget = false;
int3 travelDirectionDirect = make_int3(
cudaBound(-1, gridCellCoordinateStart.x - currentCellCoordinate.x, 1),
cudaBound(-1, gridCellCoordinateStart.y - currentCellCoordinate.y, 1),
cudaBound(-1, gridCellCoordinateStart.z - currentCellCoordinate.z, 1));
if(!foundNextCellTowardsTarget)
{
// Paths found using the three nested loops below often look strange, because we search
// in certain directions first. To prevent this, we first search the cell towards the
// direction of the goal...
int3 neighbourCellCoordinate = currentCellCoordinate + travelDirectionDirect;
if(parametersPathPlanner.grid.isCellInGrid(neighbourCellCoordinate))
{
int neighbourCellIndex = parametersPathPlanner.grid.getCellHash(neighbourCellCoordinate);
u_int8_t neighborValue = gridValues[neighbourCellIndex];
if(neighborValue < stepsToStartCell)
{
// Sometimes, we find a cell that is not smaller by ONE, but by MULTIPLE. I haven't found the bug yet,
// but the underlying grid does contain those hiccups. So when we start at the goal cell with e.g. 15,
// then jump to 14, 13, and then 11, we won't actually fill index 12 of the waypoint array, in effect
// reusing waypoint 12 from a previous search.
if(neighborValue != stepsToStartCell-1)
{
printf("uh-oh, error2, there's currently %d steps to start, but neighbor value is %d!\n", stepsToStartCell, neighborValue);
// print the grid containgin the cells with a neighbor difference greater than 1!
for(int printYDiff = -1;printYDiff<=1;printYDiff++)
{
int printY = printYDiff + currentCellCoordinate.y;
if(travelDirectionDirect.y == 0) printY = currentCellCoordinate.y; // print one y-slice only if the cell-hop-error is within this y-slice
printf("grid at height/y %d:\n", printY);
for(int printZ=0;printZ<parametersPathPlanner.grid.cells.z;printZ++)
{
for(int printX=0;printX<parametersPathPlanner.grid.cells.x;printX++)
{
printf("%03d ", gridValues[parametersPathPlanner.grid.getCellHash(make_int3(printX, printY, printZ))]);
}
printf("\n");
}
printf("\n");
if(travelDirectionDirect.y == 0) break;
}
}
// prepend our current cell's position to the waypoint list.
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourCellCoordinate);
waypoints[neighborValue] = make_float4(cellCenter);
printf("retrievePathD(): found by direct step: from cell %d/%d/%d => %d/%d/%d => %d/%d/%d, index %d now at %.2f/%.2f/%.2f\n",
currentCellCoordinate.x, currentCellCoordinate.y, currentCellCoordinate.z,
travelDirectionDirect.x, travelDirectionDirect.y, travelDirectionDirect.z,
neighbourCellCoordinate.x, neighbourCellCoordinate.y, neighbourCellCoordinate.z,
neighborValue, cellCenter.x, cellCenter.y, cellCenter.z);
// We found a neighbor with a smaller distance. Use it!
currentCellCoordinate = neighbourCellCoordinate;
// Save this step for the next iteration!
lastTravelDirection = travelDirectionDirect;
// Escape those 3 for-loops to continue searching from this next cell.
foundNextCellTowardsTarget = true;
// Update distance to start-position, should be a simple decrement.
stepsToStartCell = neighborValue;
}
}
}
if(!foundNextCellTowardsTarget)
{
// Ok, the direct step didn't work.
// Define search order. First try to repeat the last step. If that fails, at least try to keep the height.
// Wrong: now I think that going as directly as possible is more important than repeating the last step.
// Let's see what the paths look like.
lastTravelDirection = travelDirectionDirect;
int searchOrderX[3];
if(lastTravelDirection.x == 0)
{
searchOrderX[0] = lastTravelDirection.x;
searchOrderX[1] = -1;
searchOrderX[2] = +1;
}
else
{
searchOrderX[0] = lastTravelDirection.x;
searchOrderX[1] = +0;
searchOrderX[2] = -lastTravelDirection.x;
}
int searchOrderY[3];
if(lastTravelDirection.y == 0)
{
searchOrderY[0] = lastTravelDirection.y;
searchOrderY[1] = -1;
searchOrderY[2] = +1;
}
else
{
searchOrderY[0] = lastTravelDirection.y;
searchOrderY[1] = +0;
searchOrderY[2] = -lastTravelDirection.y;
}
int searchOrderZ[3];
if(lastTravelDirection.z == 0)
{
searchOrderZ[0] = lastTravelDirection.z;
searchOrderZ[1] = -1;
searchOrderZ[2] = +1;}
else
{
searchOrderZ[0] = lastTravelDirection.z;
searchOrderZ[1] = +0;
searchOrderZ[2] = -lastTravelDirection.z;
}
// now search the neighbors in the given order.
for(int z=0; z<3 && !foundNextCellTowardsTarget; z++)
{
for(int y=0; y<3 && !foundNextCellTowardsTarget; y++) // check lower paths first
{
for(int x=0; x<3 && !foundNextCellTowardsTarget; x++)
{
int3 cellOffset = make_int3(searchOrderX[x], searchOrderY[y], searchOrderZ[z]);
int3 neighbourCellCoordinate = currentCellCoordinate + cellOffset;
if(parametersPathPlanner.grid.isCellInGrid(neighbourCellCoordinate))
{
int neighbourCellIndex = parametersPathPlanner.grid.getCellHash(neighbourCellCoordinate);
u_int8_t neighborValue = gridValues[neighbourCellIndex];
if(neighborValue < stepsToStartCell)
{
// We have found a neighboring cell with smaller info. Let's go that way!
// Sometimes, we find a cell that is not smaller by ONE, but by MULTIPLE. I haven't found the bug yet,
// but the underlying grid does contain those hiccups. So when we start at the goal cell with e.g. 15,
// then jump to 14, 13, and then 11, we won't actually fill index 12 of the waypoint array, in effect
// reusing waypoint 12 from a previous search.
if(neighborValue != stepsToStartCell-1)
{
printf("uh-oh, error2, there's currently %d steps to start, but neighbor value is %d!\n", stepsToStartCell, neighborValue);
// print the grid containgin the cells with a neighbor difference greater than 1!
for(int printYDiff = -1;printYDiff<=1;printYDiff++)
{
int printY = printYDiff + currentCellCoordinate.y;
if(cellOffset.y == 0) printY = currentCellCoordinate.y; // print one y-slice only if the cell-hop-error is within this y-slice
printf("grid at height/y %d:\n", printY);
for(int printZ=0;printZ<parametersPathPlanner.grid.cells.z;printZ++)
{
for(int printX=0;printX<parametersPathPlanner.grid.cells.x;printX++)
{
printf("%03d ", gridValues[parametersPathPlanner.grid.getCellHash(make_int3(printX, printY, printZ))]);
}
printf("\n");
}
printf("\n");
if(cellOffset.y == 0) break;
}
}
// Append our current cell's position to the waypoint list.
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourCellCoordinate);
if(x+y+z == 0)
{
printf("retrievePathD(): found by repeating last step: from cell %d/%d/%d => %d/%d/%d => %d/%d/%d, index %d now at %.2f/%.2f/%.2f\n",
currentCellCoordinate.x, currentCellCoordinate.y, currentCellCoordinate.z,
cellOffset.x, cellOffset.y, cellOffset.z,
neighbourCellCoordinate.x, neighbourCellCoordinate.y, neighbourCellCoordinate.z,
neighborValue, cellCenter.x, cellCenter.y, cellCenter.z);
}
else
{
printf("retrievePathD(): found by searching all neighbors: from cell %d/%d/%d => %d/%d/%d => %d/%d/%d, index %d now at %.2f/%.2f/%.2f\n",
currentCellCoordinate.x, currentCellCoordinate.y, currentCellCoordinate.z,
cellOffset.x, cellOffset.y, cellOffset.z,
neighbourCellCoordinate.x, neighbourCellCoordinate.y, neighbourCellCoordinate.z,
neighborValue, cellCenter.x, cellCenter.y, cellCenter.z);
}
// We found a neighbor with a smaller distance. Use it!
currentCellCoordinate = neighbourCellCoordinate;
lastTravelDirection = cellOffset;
// The w-component doesn't matter here, so set to zero. Later on, the w-component
// will be set to 1 if it turns out that the waypoint is in a now-occupied cell.
waypoints[neighborValue] = make_float4(cellCenter, 0.0);
// Escape those 3 for-loops to continue searching from this next cell.
foundNextCellTowardsTarget = true;
// Update distance to start-position, should be a simple decrement.
stepsToStartCell = neighborValue;
}
}
}
}
}
}
}
while(stepsToStartCell > 1);
// waypoints[1] was filled above with the cell-center. But we want it to be the start-position, which
// - although contained in the cell - is probably not exactly its center.
printf("retrievePathD(): ending, writing start-pos into index 1: %.2f/%.2f/%.2f\n",
parametersPathPlanner.start.x, parametersPathPlanner.start.y, parametersPathPlanner.start.z);
waypoints[1] = make_float4(parametersPathPlanner.start);
}
}
void retrievePath(unsigned char* gridValues, float *waypoints, hipStream_t *stream)
{
hipLaunchKernelGGL(( retrievePathD), dim3(1), dim3(1), 0, *stream, gridValues, (float4*)waypoints);
cudaCheckSuccess("retrievePathD");
}
__global__ void testWayPointCellOccupancyD(unsigned char* gridValues, float4* upcomingWayPoints, unsigned int numberOfWayPoints)
{
uint waypointIndex = getThreadIndex1D();
if(waypointIndex >= numberOfWayPoints) return;
float4 waypoint = upcomingWayPoints[waypointIndex];
const int3 gridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(make_float3(waypoint.x, waypoint.y, waypoint.z));
if(parametersPathPlanner.grid.isCellInGrid(gridCellCoordinate))
{
// Overwrite the waypoint's information gain, re-using it for collision-detection.
const int gridCellHash = parametersPathPlanner.grid.getCellHash(gridCellCoordinate);
waypoint.w = gridValues[gridCellHash];
upcomingWayPoints[waypointIndex] = waypoint;
}
else
{
printf("testWayPointCellOccupancyD(): bug, waypoint %d is supposedly at %.2f/%.2f/%.2f/%.2f, which is outside the grid.\n",
waypointIndex, waypoint.x, waypoint.y, waypoint.z, waypoint.w);
}
}
void testWayPointCellOccupancy(unsigned char* gridValues, float* upcomingWayPoints, unsigned int numberOfWayPoints, hipStream_t *stream)
{
// Start a sufficient number of threads and let the superfuous ones hang out for a while.
uint numThreads, numBlocks;
computeExecutionKernelGrid(numberOfWayPoints, 64, numBlocks, numThreads);
hipLaunchKernelGGL(( testWayPointCellOccupancyD), dim3(numBlocks), dim3(numThreads), 0, *stream, gridValues, (float4*)upcomingWayPoints, numberOfWayPoints);
cudaCheckSuccess("testWayPointCellOccupancy");
}
| d7e94e3abc08045718f82158536dd2967049fb3d.cu | // Fix for gcc 4.7
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#include "grid.cuh"
#include "cudahelper.cuh"
#include "helper_math.h"
#include "parameterspathplanner.cuh"
#include "pathplanner.cuh"
// only for printf debugging
#include <stdlib.h>
#include <stdio.h>
// simulation parameters in constant memory
__constant__ ParametersPathPlanner parametersPathPlanner;
__constant__ Grid growingGrid;
void copyParametersToGpu(ParametersPathPlanner *hostParams)
{
// Copy parameters to constant memory.
cudaSafeCall(cudaMemcpyToSymbol(parametersPathPlanner, hostParams, sizeof(ParametersPathPlanner)));
}
__global__
void fillOccupancyGridD(u_int8_t* gridValues, const float4* colliderPos, unsigned int numColliders, unsigned int numCells)
{
uint colliderIndex = getThreadIndex1D();
if(colliderIndex >= numColliders) return;
const float4 particlePosition = colliderPos[colliderIndex];
if(parametersPathPlanner.grid.isPositionInGrid(particlePosition))
{
// get grid-cell of particle
int3 particleGridCell = parametersPathPlanner.grid.getCellCoordinate(make_float3(particlePosition));
// The cell-hash IS the offset in memory, as cells are adressed linearly
int cellHash = parametersPathPlanner.grid.getCellHash(particleGridCell);
if(cellHash >= 0 && cellHash < numCells)
{
gridValues[cellHash] = 255;
}
else
{
printf("ERROR, position was supposed to be in grid! We have %d cells and want to write to cell %d.\n\n\n", numCells, cellHash);
}
}
}
__global__
void dilateOccupancyGridD(u_int8_t* gridValues, unsigned int numCells)
{
// Dilate the occupied cells for additional safety. This also allows expanding routes diagonally
// later-on, as its ok to pass diagonally between occupied cells after dilation
uint cellIndex = getThreadIndex1D();
if(cellIndex >= numCells) return;
u_int8_t ownValue = gridValues[cellIndex];
if(ownValue == 255) return;
int3 threadGridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(cellIndex);
for(int z=-1;z<=1;z++)
{
for(int y=-1;y<=1;y++)
{
for(int x=-1;x<=1;x++)
{
const int3 neighbourGridCellCoordinate = threadGridCellCoordinate + make_int3(x,y,z);
//if(cellIndex == 0) printf("cellIndex 0, coord 0/0/0 neighbor %d/%d/%d\n", x, y, z);
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getSafeCellHash(neighbourGridCellCoordinate);
// Because CUDA works using thread-batches, we cannot just load all cells, then compute and then store all cells.
// Using batches would mean we would dilate a part of the grid, then load the neighboring part and dilate the
// dilation again, making almost all of the grid become occupied.
// For this reason, we say that 255 is occupied and 254 is dilated-occupied. This way, we don't need two grids. Hah!
if(gridValues[neighbourGridCellIndex] == 255)
{
gridValues[cellIndex] = 254;
return;
}
}
}
}
}
}
__global__ void clearOccupancyGridAboveVehiclePositionD(
unsigned char* gridValues,
float vehicleX,
float vehicleY,
float vehicleZ)
{
float3 vehiclePos = make_float3(vehicleX, vehicleY, vehicleZ);
int3 gridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(vehiclePos);
// for(int z=-1;z<=1;z++)
// {
// for(int x=-1;x<=1;x++)
// {
for(int y=0;y<=2;y++)
{
// We want to clear only the vehicle's cell 2 cells above it.
// if(y == 0 && (x != 0 || z != 0)) continue;
int3 neighbourGridCellCoordinate = gridCellCoordinate + make_int3(0,y,0);
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getCellHash(neighbourGridCellCoordinate);
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourGridCellCoordinate);
printf("clearOccupancyGridAboveVehiclePositionD(): clearing cell centered at %.2f / %.2f / %.2f\n", cellCenter.x, cellCenter.y, cellCenter.z);
gridValues[neighbourGridCellIndex] = 0;
}
}
// }
// }
}
void clearOccupancyGridAboveVehiclePosition(
unsigned char* gridValues,
float vehicleX,
float vehicleY,
float vehicleZ,
cudaStream_t *stream)
{
clearOccupancyGridAboveVehiclePositionD<<< 1, 1, 0, *stream>>>(gridValues, vehicleX, vehicleY, vehicleZ);
cudaCheckSuccess("clearOccupancyGridAboveVehiclePosition");
}
// If startSearchNumberOfCellsAbove is 0, we will start searching for free cells in the waypoint's cell and then further upwards and outwards
// If startSearchNumberOfCellsAbove is 3, we will start searching for free cells 3 cells above the waypoint's cell and then further upwards and outwards
__global__ void moveWayPointsToSafetyD(unsigned char* gridValues, float4* deviceWaypoints, unsigned int numberOfWayPoints, unsigned int startSearchNumberOfCellsAbove)
{
uint wptIndex = getThreadIndex1D();
if(wptIndex >= numberOfWayPoints) return;
float4 waypoint = deviceWaypoints[wptIndex];
if(!parametersPathPlanner.grid.isPositionInGrid(waypoint))
{
printf("error, waypoint %d is not even in the grid!\n", wptIndex);
deviceWaypoints[wptIndex] = make_float4(0.0);
return;
}
int3 gridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(make_float3(waypoint));
if(!parametersPathPlanner.grid.isCellInGrid(gridCellCoordinate))
{
printf("moveWayPointsToSafetyD: error, this doesn't make sense at all!");
return;
}
int searchOrderHorizontal[3];
searchOrderHorizontal[0] = 0;
searchOrderHorizontal[1] = -1;
searchOrderHorizontal[2] = +1;
bool freeCellFound = false;
if(wptIndex == 0) printf("cell height is %.2f meters\n", parametersPathPlanner.grid.getCellSize().y);
// With a scanner range of 15m, how many cells should we search upwards of the waypoint candidate?
unsigned int maxNumberOfGridCellsToGoUp = 15.0 / parametersPathPlanner.grid.getCellSize().y;
printf("waypoint %d at %.2f/%.2f/%.2f will search up to %d cells up.\n", wptIndex, waypoint.x, waypoint.y, waypoint.z, maxNumberOfGridCellsToGoUp);
for(int z=0;z<3 && !freeCellFound;z++)
{
for(int x=0;x<3 && !freeCellFound;x++)
{
for(int y=startSearchNumberOfCellsAbove;y<maxNumberOfGridCellsToGoUp && !freeCellFound;y++)
{
const int3 neighbourGridCellCoordinate = gridCellCoordinate + make_int3(searchOrderHorizontal[x],y,searchOrderHorizontal[z]);
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getSafeCellHash(neighbourGridCellCoordinate);
if(gridValues[neighbourGridCellIndex] == 0)
{
freeCellFound = true;
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourGridCellCoordinate);
deviceWaypoints[wptIndex] = make_float4(cellCenter, waypoint.w);
printf("waypoint %d found free neighbor at %.2f/%.2f/%.2f.\n", wptIndex, cellCenter.x, cellCenter.y, cellCenter.z);
}
}
}
}
}
// The waypoint is unusable, remove it!
if(!freeCellFound)
{
printf("waypoint %d found no free neighbor.\n", wptIndex);
deviceWaypoints[wptIndex] = make_float4(0.0);
}
}
// Will move the waypoints to cells that are free in gridOccupancy. If the w-component is untouched (and non-zero),
// it was possible to move them to free zones. Waypoints with w-component of zero could not find a free neighboring cell.
void moveWayPointsToSafetyGpu(
unsigned char* gridOccupancy,
float* mDeviceWaypoints,
unsigned int numberOfWayPoints,
unsigned int startSearchNumberOfCellsAbove,
cudaStream_t* stream)
{
uint numThreads, numBlocks;
computeExecutionKernelGrid(numberOfWayPoints, 64, numBlocks, numThreads);
moveWayPointsToSafetyD<<< numBlocks, numThreads, 0, *stream>>>(gridOccupancy, (float4*)mDeviceWaypoints, numberOfWayPoints, startSearchNumberOfCellsAbove);
cudaCheckSuccess("moveWayPointsToSafetyGpu");
}
void fillOccupancyGrid(unsigned char* gridValues, const float* colliderPos, unsigned int numColliders, unsigned int numCells, cudaStream_t *stream)
{
if(numColliders == 0) return;
// set all cells to empty
cudaSafeCall(cudaMemset(gridValues, 0, numCells * sizeof(unsigned char)));
uint numThreads, numBlocks;
computeExecutionKernelGrid(numColliders, 64, numBlocks, numThreads);
printf("fillOccupancyGrid(): using %d colliders at %p to fill occupancy grid with %d cells at %p.\n", numColliders, colliderPos, numCells, gridValues);
fillOccupancyGridD<<< numBlocks, numThreads, 0, *stream>>>(gridValues, (float4*)colliderPos, numColliders, numCells);
cudaCheckSuccess("fillOccupancyGrid");
printf("fillOccupancyGrid(): done.\n");
}
void dilateOccupancyGrid(unsigned char* gridValues, unsigned int numCells, cudaStream_t *stream)
{
printf("dilateOccupancyGrid(): dilating %d cells.\n", numCells);
if(numCells == 0) return;
uint numThreads, numBlocks;
computeExecutionKernelGrid(numCells, 64, numBlocks, numThreads);
cudaCheckSuccess("dilateOccupancyGridDBefore");
dilateOccupancyGridD<<< numBlocks, numThreads, 0, *stream>>>(gridValues, numCells);
cudaCheckSuccess("dilateOccupancyGridDAfter");
printf("dilateOccupancyGrid(): done.\n");
}
__device__
int bound(int min, int value, int max)
{
if(value < min)
return min;
else if(value > max)
return max;
else
return value;
}
__global__ void markStartCellD(u_int8_t* gridValues)
{
int3 cellCoordinateStart = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.start);
int cellIndexStart = parametersPathPlanner.grid.getSafeCellHash(cellCoordinateStart);
if(cellIndexStart > 0)
{
printf("markStartCellD(): setting start cell %d to 1\n", cellIndexStart);
gridValues[cellIndexStart] = 1;
}
else
{
printf("markStartCellD(): start cell %.1f/%.1f/%.1f is outside grid!\n", parametersPathPlanner.start.x, parametersPathPlanner.start.y, parametersPathPlanner.start.z);
}
}
__global__
void growGridD(u_int8_t* gridValues, Grid subGrid)
{
uint subGridCellHash = getThreadIndex1D();
if(subGridCellHash >= subGrid.getCellCount()) return;
float3 subGridCellCenter = subGrid.getCellCenter(subGrid.getCellCoordinate(subGridCellHash));
int3 superGridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(subGridCellCenter);
unsigned int superGridCellHash = parametersPathPlanner.grid.getCellHash(superGridCellCoordinate);
u_int8_t lowestNonNullNeighbor = 254; // thats a dilated cell's value
u_int8_t ownValue = gridValues[superGridCellHash];
if(ownValue == 0)
{
// Check all neighbors for the lowest value d != 0,254,255 and put d++ into our own cell.
for(int z=-1;z<=1;z++)
{
for(int y=-1;y<=1;y++)
{
for(int x=-1;x<=1;x++)
{
// don't look into our own cell for neighboring values!
if(x == 0 && y == 0 && z == 0)
{
//printf("will not check myself for neighbors.\n");
continue;
}
const int3 neighbourGridCellCoordinate = superGridCellCoordinate + make_int3(x,y,z);
// Border-cells might ask for neighbors outside of the grid.
if(parametersPathPlanner.grid.isCellInGrid(neighbourGridCellCoordinate))
{
const int neighbourGridCellIndex = parametersPathPlanner.grid.getCellHash(neighbourGridCellCoordinate);
const u_int8_t neighborValue = gridValues[neighbourGridCellIndex];
// Find the lowest neighbor that is neither 0 nor 254/255
if(neighborValue < lowestNonNullNeighbor && neighborValue != 0)
lowestNonNullNeighbor = neighborValue;
}
else
{
// @subGrid should be clamped to the super grid, so this happens only when checking the non-existing neighbors of border cells
/*printf("bug, neighborgridcellindex is %d, super-coord was %d/%d/%d, neighbor-coord was %d/%d/%d\n",
neighbourGridCellIndex,
superGridCellCoordinate.x,
superGridCellCoordinate.y,
superGridCellCoordinate.z,
neighbourGridCellCoordinate.x,
neighbourGridCellCoordinate.y,
neighbourGridCellCoordinate.z);*/
}
}
}
}
// Write our cell's value. A cell first contains a 0, then the neighborCellValue+1. Once it does
// contain a value, it will never change. We're only interested in replacing the value with lower
// numbers, but since the values spread like a wave, that'll never happen.
if(lowestNonNullNeighbor < 254/* && ownValue == 0*/)
{
/*printf("found value %d in neighbor, setting sub-cell %d / super-cell %d (%d/%d/%d) from %d to %d\n",
lowestNonNullNeighbor,
subGridCellHash,
superGridCellHash,
superGridCellCoordinate.x, superGridCellCoordinate.y, superGridCellCoordinate.z,
ownValue,
lowestNonNullNeighbor + 1);*/
gridValues[superGridCellHash] = lowestNonNullNeighbor + 1;
}
else
{
/*printf("failed to find an interesting neighbor for sub-grid-cell %d, super-grid-cell %d (%3d/%3d/%3d) with value %d\n",
subGridCellHash,
superGridCellHash,
superGridCellCoordinate.x,
superGridCellCoordinate.y,
superGridCellCoordinate.z,
ownValue);*/
}
}
else
{
/*printf("sub-grid-cell %d, super-grid-cell %d (%3d/%3d/%3d) already has value %d\n",
subGridCellHash,
superGridCellHash,
superGridCellCoordinate.x,
superGridCellCoordinate.y,
superGridCellCoordinate.z,
ownValue);*/
}
}
void markStartCell(unsigned char* gridValues, cudaStream_t *stream)
{
// set the cell containing "start" to 1!
markStartCellD<<<1, 1, 0, *stream>>>(gridValues);
cudaCheckSuccess("markStartCellD");
}
void growGrid(unsigned char* gridValues, ParametersPathPlanner* parameters, cudaStream_t *stream)
{
uint numThreads, numBlocks;
const int3 cellCoordinateStart = parameters->grid.getCellCoordinate(parameters->start);
const unsigned int longestSideCellCount = parameters->grid.getLongestSideCellCount();
int3 thisIterationCellMin, thisIterationCellMax, lastCellMin, lastCellMax;
// Let the wave propagate as long as it might take to go from one corner to the opposing one
const int maxNumberOfSteps = sqrt(pow(longestSideCellCount,2) + pow(longestSideCellCount,2)) * 2;
for(int i=1;i<maxNumberOfSteps;i++)
{
thisIterationCellMin = parameters->grid.clampCellCoordinate(cellCoordinateStart + make_int3(-i, -i, -i));
thisIterationCellMax = parameters->grid.clampCellCoordinate(cellCoordinateStart + make_int3(+i, +i, +i));
/* disable this break, as it prevents paths from going back "inside"
*if(thisIterationCellMin == lastCellMin && thisIterationCellMax == lastCellMax)
{
// cell coordinates haven't changed, so we have grown the whole grid.
printf("growGrid(): stopping after iteration %d, as cellMin/cellMax haven't changed.\n", i);
break;
}*/
lastCellMin = thisIterationCellMin;
lastCellMax = thisIterationCellMax;
Grid iterationGrid;
iterationGrid.cells.x = thisIterationCellMax.x - thisIterationCellMin.x + 1;
iterationGrid.cells.y = thisIterationCellMax.y - thisIterationCellMin.y + 1;
iterationGrid.cells.z = thisIterationCellMax.z - thisIterationCellMin.z + 1;
float3 superGridCellSize = parameters->grid.getCellSize();
iterationGrid.worldMin = parameters->grid.getCellCenter(thisIterationCellMin) - superGridCellSize/2;
iterationGrid.worldMax = parameters->grid.getCellCenter(thisIterationCellMax) + superGridCellSize/2;
// cudaSafeCall(cudaMemcpyToSymbol(growingGrid, iterationGrid, sizeof(Grid)));
computeExecutionKernelGrid(iterationGrid.getCellCount(), 64, numBlocks, numThreads);
printf("growGrid(): iteration %d of max %d: growing grid in %d/%d/%d = %d cells.\n",
i, maxNumberOfSteps, iterationGrid.cells.x, iterationGrid.cells.y, iterationGrid.cells.z, iterationGrid.getCellCount());
growGridD<<< numBlocks, numThreads, 0, *stream>>>(gridValues, iterationGrid);
cudaCheckSuccess("growGridD");
}
}
__global__
void checkGoalCellD(unsigned char* gridValues, unsigned int numCells, unsigned int searchRange, unsigned int *status)
{
int3 goalGridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.goal);
int goalGridCellOffset = parametersPathPlanner.grid.getSafeCellHash(goalGridCellCoordinate);
uint valueInGoalCell = gridValues[goalGridCellOffset];
printf("checkGoalCellD(): value in goal cell at %.2f/%.2f/%.2f is %d.\n",
parametersPathPlanner.goal.x,
parametersPathPlanner.goal.y,
parametersPathPlanner.goal.z,
valueInGoalCell);
if(valueInGoalCell < 254)
{
return;
}
else
{
// Cell is occupied or dilated-occupied! Try to find an empty neighbor!
// With searchRange = 3, create an array {1,-1,2,-2,3,-3}
float *neighborsSearchOrder = new float[searchRange * 2];
for(int i=1;i<=searchRange;i++)
{
neighborsSearchOrder[2*i-2] = i;
neighborsSearchOrder[2*i-1] = -i;
}
//for(...)
delete neighborsSearchOrder;
}
}
// This method checks whether the goal cell is occupied. If so, it tries
// to find a free neighboring cell that can be used instead.
GoalCellStatus checkGoalCell(unsigned char* gridValues, unsigned int numCells, unsigned int searchRange, cudaStream_t *stream)
{
if(numCells == 0) return GoalCellBlocked;
u_int32_t* statusDevice = 0;
cudaSafeCall(cudaMalloc((void**)statusDevice, sizeof(u_int32_t)));
checkGoalCellD<<< 1, 1, 0, *stream>>>(gridValues, numCells, searchRange, statusDevice);
cudaCheckSuccess("checkGoalCell");
u_int32_t statusHost;
cudaSafeCall(cudaMemcpy(&statusHost, statusDevice, sizeof(u_int32_t), cudaMemcpyDeviceToHost));
if(statusHost == 0)
{
return GoalCellFree;
}
else if(statusHost == 1)
{
return GoalCellMoved;
}
else
{
return GoalCellBlocked;
}
}
__global__
void retrievePathD(unsigned char* gridValues, float4* waypoints)
{
int3 gridCellGoalCoordinate = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.goal);
int gridCellGoalHash = parametersPathPlanner.grid.getSafeCellHash(gridCellGoalCoordinate);
int3 gridCellCoordinateStart = parametersPathPlanner.grid.getCellCoordinate(parametersPathPlanner.start);
uint valueInGoalCell = gridValues[gridCellGoalHash];
printf("retrievePathD(): value in goal cell at %.2f/%.2f/%.2f is %d.\n",
parametersPathPlanner.goal.x,
parametersPathPlanner.goal.y,
parametersPathPlanner.goal.z,
valueInGoalCell);
if(valueInGoalCell == 0)
{
// Tell the caller we failed to find a valid path by setting the first waypoint to all-zero.
waypoints[0] = make_float4(0.0, 0.0, 0.0, 0.0);
}
else if(valueInGoalCell >= 254)
{
// Tell the caller we failed to find a valid path because of an occupied target cell.
waypoints[0] = make_float4(0.0, 0.0, 0.0, 1.0);
}
else
{
// Use this ONE thread to collect all the waypoints. The first float4 will contain
// the number of waypoints including start and goal. The next float4s will be those
// waypoints. Add 0.1 so we can cast to int without losing something.
waypoints[0] = make_float4(valueInGoalCell + 0.1);
// Set the last waypoint, which equals the goal position
waypoints[valueInGoalCell] = make_float4(parametersPathPlanner.goal);
// Now traverse from goal back to start and save the world positions in waypoints
uint stepsToStartCell = valueInGoalCell;
int3 currentCellCoordinate = gridCellGoalCoordinate;
// Saves the direction/offset that we step to get to the next cell.
int3 lastTravelDirection;
do
{
// We are at cellCoordinate and found a value of distance. Now check all neighbors
// until we find one with a smaller value. That's the path backwards towards the goal.
bool foundNextCellTowardsTarget = false;
int3 travelDirectionDirect = make_int3(
cudaBound(-1, gridCellCoordinateStart.x - currentCellCoordinate.x, 1),
cudaBound(-1, gridCellCoordinateStart.y - currentCellCoordinate.y, 1),
cudaBound(-1, gridCellCoordinateStart.z - currentCellCoordinate.z, 1));
if(!foundNextCellTowardsTarget)
{
// Paths found using the three nested loops below often look strange, because we search
// in certain directions first. To prevent this, we first search the cell towards the
// direction of the goal...
int3 neighbourCellCoordinate = currentCellCoordinate + travelDirectionDirect;
if(parametersPathPlanner.grid.isCellInGrid(neighbourCellCoordinate))
{
int neighbourCellIndex = parametersPathPlanner.grid.getCellHash(neighbourCellCoordinate);
u_int8_t neighborValue = gridValues[neighbourCellIndex];
if(neighborValue < stepsToStartCell)
{
// Sometimes, we find a cell that is not smaller by ONE, but by MULTIPLE. I haven't found the bug yet,
// but the underlying grid does contain those hiccups. So when we start at the goal cell with e.g. 15,
// then jump to 14, 13, and then 11, we won't actually fill index 12 of the waypoint array, in effect
// reusing waypoint 12 from a previous search.
if(neighborValue != stepsToStartCell-1)
{
printf("uh-oh, error2, there's currently %d steps to start, but neighbor value is %d!\n", stepsToStartCell, neighborValue);
// print the grid containgin the cells with a neighbor difference greater than 1!
for(int printYDiff = -1;printYDiff<=1;printYDiff++)
{
int printY = printYDiff + currentCellCoordinate.y;
if(travelDirectionDirect.y == 0) printY = currentCellCoordinate.y; // print one y-slice only if the cell-hop-error is within this y-slice
printf("grid at height/y %d:\n", printY);
for(int printZ=0;printZ<parametersPathPlanner.grid.cells.z;printZ++)
{
for(int printX=0;printX<parametersPathPlanner.grid.cells.x;printX++)
{
printf("%03d ", gridValues[parametersPathPlanner.grid.getCellHash(make_int3(printX, printY, printZ))]);
}
printf("\n");
}
printf("\n");
if(travelDirectionDirect.y == 0) break;
}
}
// prepend our current cell's position to the waypoint list.
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourCellCoordinate);
waypoints[neighborValue] = make_float4(cellCenter);
printf("retrievePathD(): found by direct step: from cell %d/%d/%d => %d/%d/%d => %d/%d/%d, index %d now at %.2f/%.2f/%.2f\n",
currentCellCoordinate.x, currentCellCoordinate.y, currentCellCoordinate.z,
travelDirectionDirect.x, travelDirectionDirect.y, travelDirectionDirect.z,
neighbourCellCoordinate.x, neighbourCellCoordinate.y, neighbourCellCoordinate.z,
neighborValue, cellCenter.x, cellCenter.y, cellCenter.z);
// We found a neighbor with a smaller distance. Use it!
currentCellCoordinate = neighbourCellCoordinate;
// Save this step for the next iteration!
lastTravelDirection = travelDirectionDirect;
// Escape those 3 for-loops to continue searching from this next cell.
foundNextCellTowardsTarget = true;
// Update distance to start-position, should be a simple decrement.
stepsToStartCell = neighborValue;
}
}
}
if(!foundNextCellTowardsTarget)
{
// Ok, the direct step didn't work.
// Define search order. First try to repeat the last step. If that fails, at least try to keep the height.
// Wrong: now I think that going as directly as possible is more important than repeating the last step.
// Let's see what the paths look like.
lastTravelDirection = travelDirectionDirect;
int searchOrderX[3];
if(lastTravelDirection.x == 0)
{
searchOrderX[0] = lastTravelDirection.x;
searchOrderX[1] = -1;
searchOrderX[2] = +1;
}
else
{
searchOrderX[0] = lastTravelDirection.x;
searchOrderX[1] = +0;
searchOrderX[2] = -lastTravelDirection.x;
}
int searchOrderY[3];
if(lastTravelDirection.y == 0)
{
searchOrderY[0] = lastTravelDirection.y;
searchOrderY[1] = -1;
searchOrderY[2] = +1;
}
else
{
searchOrderY[0] = lastTravelDirection.y;
searchOrderY[1] = +0;
searchOrderY[2] = -lastTravelDirection.y;
}
int searchOrderZ[3];
if(lastTravelDirection.z == 0)
{
searchOrderZ[0] = lastTravelDirection.z;
searchOrderZ[1] = -1;
searchOrderZ[2] = +1;}
else
{
searchOrderZ[0] = lastTravelDirection.z;
searchOrderZ[1] = +0;
searchOrderZ[2] = -lastTravelDirection.z;
}
// now search the neighbors in the given order.
for(int z=0; z<3 && !foundNextCellTowardsTarget; z++)
{
for(int y=0; y<3 && !foundNextCellTowardsTarget; y++) // check lower paths first
{
for(int x=0; x<3 && !foundNextCellTowardsTarget; x++)
{
int3 cellOffset = make_int3(searchOrderX[x], searchOrderY[y], searchOrderZ[z]);
int3 neighbourCellCoordinate = currentCellCoordinate + cellOffset;
if(parametersPathPlanner.grid.isCellInGrid(neighbourCellCoordinate))
{
int neighbourCellIndex = parametersPathPlanner.grid.getCellHash(neighbourCellCoordinate);
u_int8_t neighborValue = gridValues[neighbourCellIndex];
if(neighborValue < stepsToStartCell)
{
// We have found a neighboring cell with smaller info. Let's go that way!
// Sometimes, we find a cell that is not smaller by ONE, but by MULTIPLE. I haven't found the bug yet,
// but the underlying grid does contain those hiccups. So when we start at the goal cell with e.g. 15,
// then jump to 14, 13, and then 11, we won't actually fill index 12 of the waypoint array, in effect
// reusing waypoint 12 from a previous search.
if(neighborValue != stepsToStartCell-1)
{
printf("uh-oh, error2, there's currently %d steps to start, but neighbor value is %d!\n", stepsToStartCell, neighborValue);
// print the grid containgin the cells with a neighbor difference greater than 1!
for(int printYDiff = -1;printYDiff<=1;printYDiff++)
{
int printY = printYDiff + currentCellCoordinate.y;
if(cellOffset.y == 0) printY = currentCellCoordinate.y; // print one y-slice only if the cell-hop-error is within this y-slice
printf("grid at height/y %d:\n", printY);
for(int printZ=0;printZ<parametersPathPlanner.grid.cells.z;printZ++)
{
for(int printX=0;printX<parametersPathPlanner.grid.cells.x;printX++)
{
printf("%03d ", gridValues[parametersPathPlanner.grid.getCellHash(make_int3(printX, printY, printZ))]);
}
printf("\n");
}
printf("\n");
if(cellOffset.y == 0) break;
}
}
// Append our current cell's position to the waypoint list.
float3 cellCenter = parametersPathPlanner.grid.getCellCenter(neighbourCellCoordinate);
if(x+y+z == 0)
{
printf("retrievePathD(): found by repeating last step: from cell %d/%d/%d => %d/%d/%d => %d/%d/%d, index %d now at %.2f/%.2f/%.2f\n",
currentCellCoordinate.x, currentCellCoordinate.y, currentCellCoordinate.z,
cellOffset.x, cellOffset.y, cellOffset.z,
neighbourCellCoordinate.x, neighbourCellCoordinate.y, neighbourCellCoordinate.z,
neighborValue, cellCenter.x, cellCenter.y, cellCenter.z);
}
else
{
printf("retrievePathD(): found by searching all neighbors: from cell %d/%d/%d => %d/%d/%d => %d/%d/%d, index %d now at %.2f/%.2f/%.2f\n",
currentCellCoordinate.x, currentCellCoordinate.y, currentCellCoordinate.z,
cellOffset.x, cellOffset.y, cellOffset.z,
neighbourCellCoordinate.x, neighbourCellCoordinate.y, neighbourCellCoordinate.z,
neighborValue, cellCenter.x, cellCenter.y, cellCenter.z);
}
// We found a neighbor with a smaller distance. Use it!
currentCellCoordinate = neighbourCellCoordinate;
lastTravelDirection = cellOffset;
// The w-component doesn't matter here, so set to zero. Later on, the w-component
// will be set to 1 if it turns out that the waypoint is in a now-occupied cell.
waypoints[neighborValue] = make_float4(cellCenter, 0.0);
// Escape those 3 for-loops to continue searching from this next cell.
foundNextCellTowardsTarget = true;
// Update distance to start-position, should be a simple decrement.
stepsToStartCell = neighborValue;
}
}
}
}
}
}
}
while(stepsToStartCell > 1);
// waypoints[1] was filled above with the cell-center. But we want it to be the start-position, which
// - although contained in the cell - is probably not exactly its center.
printf("retrievePathD(): ending, writing start-pos into index 1: %.2f/%.2f/%.2f\n",
parametersPathPlanner.start.x, parametersPathPlanner.start.y, parametersPathPlanner.start.z);
waypoints[1] = make_float4(parametersPathPlanner.start);
}
}
void retrievePath(unsigned char* gridValues, float *waypoints, cudaStream_t *stream)
{
retrievePathD<<< 1, 1, 0, *stream>>>(gridValues, (float4*)waypoints);
cudaCheckSuccess("retrievePathD");
}
__global__ void testWayPointCellOccupancyD(unsigned char* gridValues, float4* upcomingWayPoints, unsigned int numberOfWayPoints)
{
uint waypointIndex = getThreadIndex1D();
if(waypointIndex >= numberOfWayPoints) return;
float4 waypoint = upcomingWayPoints[waypointIndex];
const int3 gridCellCoordinate = parametersPathPlanner.grid.getCellCoordinate(make_float3(waypoint.x, waypoint.y, waypoint.z));
if(parametersPathPlanner.grid.isCellInGrid(gridCellCoordinate))
{
// Overwrite the waypoint's information gain, re-using it for collision-detection.
const int gridCellHash = parametersPathPlanner.grid.getCellHash(gridCellCoordinate);
waypoint.w = gridValues[gridCellHash];
upcomingWayPoints[waypointIndex] = waypoint;
}
else
{
printf("testWayPointCellOccupancyD(): bug, waypoint %d is supposedly at %.2f/%.2f/%.2f/%.2f, which is outside the grid.\n",
waypointIndex, waypoint.x, waypoint.y, waypoint.z, waypoint.w);
}
}
void testWayPointCellOccupancy(unsigned char* gridValues, float* upcomingWayPoints, unsigned int numberOfWayPoints, cudaStream_t *stream)
{
// Start a sufficient number of threads and let the superfuous ones hang out for a while.
uint numThreads, numBlocks;
computeExecutionKernelGrid(numberOfWayPoints, 64, numBlocks, numThreads);
testWayPointCellOccupancyD<<< numBlocks, numThreads, 0, *stream>>>(gridValues, (float4*)upcomingWayPoints, numberOfWayPoints);
cudaCheckSuccess("testWayPointCellOccupancy");
}
|
c718d9763a5dfac5bca3b2b1dc5182688b38f481.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Msnhnet/layers/cuda/MsnhActivationsGPU.h"
namespace Msnhnet
{
__device__ float logisticActivateKernel(const float x)
{
return 1.f/(1.f + expf(-x));
}
__device__ float loggyActivateKernel(const float x)
{
return 2.f/(1.f + expf(-x)) - 1.f;
}
__device__ float reluActivateKernel(const float x)
{
return x*(x>0);
}
__device__ float relu6ActivateKernel(const float x)
{
return (x>0?x:0)>6?6:(x>0?x:0);
}
__device__ float hardSwishActivateKernel(const float x)
{
float res = x + 3.f;
res = (res>0?res:0)>6?6:(res>0?res:0);
res = x*res*0.16666667f;
return res;
}
__device__ float eluActivateKernel(const float x)
{
return ((x >= 0)*x + (x < 0)*(expf(x)-1.f));
}
__device__ float seluActivateKernel(const float x)
{
return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1);
}
__device__ float relieActivateKernel(const float x)
{
return (x>0) ? x : .01f*x;
}
__device__ float rampActivateKernel(const float x)
{
return x*(x>0) + .1f*x;
}
__device__ float leakyActivateKernel(const float x, const float param = 0.1f)
{
return (x>0) ? x : param*x;
}
__device__ float tanhActivateKernel(const float x)
{
return ((expf(2*x)-1)/(expf(2*x)+1));
}
__device__ float stairActivateKernel(const float x)
{
int n = static_cast<int>(floor(x));
if (n%2 == 0)
{
return (floorf(x/2.f));
}
else
{
return static_cast<float>((x - n) + floorf(x/2.f));
}
}
__device__ float hardtanActivateKernel(const float x)
{
if (x < -1)
{
return -1;
}
if (x > 1)
{
return 1;
}
return x;
}
__device__ float softplusActivateKernel(const float x, const float threshold)
{
if (x > threshold)
{
return x;
}
else if (x < -threshold)
{
return expf(x);
}
return logf(expf(x) + 1);
}
__device__ float plseActivateKernel(const float x)
{
if(x < -4)
{
return .01f * (x + 4);
}
if(x > 4)
{
return .01f * (x - 4) + 1;
}
return .125f*x + .5f;
}
__device__ float lhtanActivateKernel(const float x)
{
if(x < 0.0f)
{
return .001f*x;
}
if(x > 1.0f)
{
return .001f*(x-1) + 1;
}
return x;
}
__device__ float mishActivateKernel(const float x)
{
const float mishThreshHold = 20.f;
return x*tanhf(softplusActivateKernel(x, mishThreshHold));
}
__device__ float swishActivateKernel(const float x)
{
return x*logisticActivateKernel(x);
}
__device__ float activateKernel(const float x, const ActivationType actType, const float params)
{
switch (actType)
{
case LOGISTIC:
return logisticActivateKernel(x);
case LOGGY:
return loggyActivateKernel(x);
case RELU:
return reluActivateKernel(x);
case RELU6:
return relu6ActivateKernel(x);
case ELU:
return eluActivateKernel(x);
case SELU:
return seluActivateKernel(x);
case RELIE:
return relieActivateKernel(x);
case RAMP:
return rampActivateKernel(x);
case LEAKY:
return leakyActivateKernel(x, params);
case TANH:
return tanhActivateKernel(x);
case PLSE:
return plseActivateKernel(x);
case STAIR:
return stairActivateKernel(x);
case HARDTAN:
return hardtanActivateKernel(x);
case LHTAN:
return lhtanActivateKernel(x);
case SOFT_PLUS:
return softplusActivateKernel(x, params);
case MISH:
return mishActivateKernel(x);
case SWISH:
return swishActivateKernel(x);
case HARD_SWISH:
return hardSwishActivateKernel(x);
case NONE:
return x;
default:
return 0;
}
}
__global__ void activateArrayKernel(float *const x, const int numX, const ActivationType actType, const float param)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < numX)
{
x[i] = activateKernel(x[i],actType,param);
}
}
void ActivationsGPU::gpuActivateArray(float *const &gpuX, const int &numX, const ActivationType &actType, const float ¶m)
{
hipLaunchKernelGGL(( activateArrayKernel), dim3(Cuda::getGrid(numX)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), gpuX,numX,actType,param);
CUDA_CHECK(hipPeekAtLastError());
}
__global__ void activateArrayPReluKernel(float *const x, const int numX, const int channels, float *const weights, const int whStep, const ActivationType actType)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(index < numX)
{
int i = index % whStep;
index = index / whStep;
int c = index % channels;
index = index / channels;
int b = index;
int idx = b*channels*whStep + c*whStep + i;
x[idx] = activateKernel(x[idx],actType,weights[c]);
}
}
void ActivationsGPU::gpuActivatePRelu(float *const &gpuX, const int &batch, const int &channels, float *const &gpuWeights, const int &whStep)
{
size_t numX = batch*channels*whStep;
hipLaunchKernelGGL(( activateArrayPReluKernel), dim3(Cuda::getGrid(numX)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), gpuX, numX, channels, gpuWeights, whStep, ActivationType::LEAKY);
CUDA_CHECK(hipPeekAtLastError());
}
__global__ void activateArrayNormChKernel(float *const gpuX, const int numX, const int batch, const int channels, const int whStep, float *const gpuOutput)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int whIndex = i%whStep;
int b = whStep;
const float eps = 0.0001;
if(i < numX)
{
float sum = eps;
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
if(val > 0)
{
sum += val;
}
}
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
if(val > 0)
{
val = val/sum;
}
else
{
val = 0;
}
gpuOutput[whIndex + c*whStep + b*whStep*channels] = val;
}
}
}
void ActivationsGPU::gpuActivateArrayNormCh(float *const &gpuX, const int &numX, const int &batch, const int &channels, const int &whStep, float *const &gpuOutput)
{
hipLaunchKernelGGL(( activateArrayNormChKernel), dim3(Cuda::getGrid(numX)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), gpuX,numX,batch,channels,whStep,gpuOutput);
CUDA_CHECK(hipPeekAtLastError());
}
__global__ void activateArrayNormChSoftMaxKernel(float *const &gpuX, const int &numX, const int &batch, const int &channels, const int &whStep, float *const &gpuOutput, const int &useMaxVal)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int whIndex = i%whStep;
int b = whStep;
const float eps = 0.0001;
if(i < numX)
{
float sum = eps;
float maxVal = -FLT_MAX;
if(useMaxVal)
{
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
if(val > maxVal || c == 0)
{
maxVal = val;
}
}
}
else
{
maxVal = 0;
}
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
sum += expf(val - maxVal);
}
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
val = expf(val - maxVal)/sum;
gpuOutput[whIndex + c*whStep + b*whStep*channels] = val;
}
}
}
void ActivationsGPU::gpuActivateArrayNormChSoftMax(float *const &gpuX, const int &numX, const int &batch, const int &channels, const int &whStep, float *const &gpuOutput, const int &useMaxVal)
{
hipLaunchKernelGGL(( activateArrayNormChSoftMaxKernel), dim3(Cuda::getGrid(numX)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), gpuX,numX,batch,channels,whStep,gpuOutput,useMaxVal);
CUDA_CHECK(hipPeekAtLastError());
}
}
| c718d9763a5dfac5bca3b2b1dc5182688b38f481.cu | #include "Msnhnet/layers/cuda/MsnhActivationsGPU.h"
namespace Msnhnet
{
__device__ float logisticActivateKernel(const float x)
{
return 1.f/(1.f + expf(-x));
}
__device__ float loggyActivateKernel(const float x)
{
return 2.f/(1.f + expf(-x)) - 1.f;
}
__device__ float reluActivateKernel(const float x)
{
return x*(x>0);
}
__device__ float relu6ActivateKernel(const float x)
{
return (x>0?x:0)>6?6:(x>0?x:0);
}
__device__ float hardSwishActivateKernel(const float x)
{
float res = x + 3.f;
res = (res>0?res:0)>6?6:(res>0?res:0);
res = x*res*0.16666667f;
return res;
}
__device__ float eluActivateKernel(const float x)
{
return ((x >= 0)*x + (x < 0)*(expf(x)-1.f));
}
__device__ float seluActivateKernel(const float x)
{
return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1);
}
__device__ float relieActivateKernel(const float x)
{
return (x>0) ? x : .01f*x;
}
__device__ float rampActivateKernel(const float x)
{
return x*(x>0) + .1f*x;
}
__device__ float leakyActivateKernel(const float x, const float param = 0.1f)
{
return (x>0) ? x : param*x;
}
__device__ float tanhActivateKernel(const float x)
{
return ((expf(2*x)-1)/(expf(2*x)+1));
}
__device__ float stairActivateKernel(const float x)
{
int n = static_cast<int>(floor(x));
if (n%2 == 0)
{
return (floorf(x/2.f));
}
else
{
return static_cast<float>((x - n) + floorf(x/2.f));
}
}
__device__ float hardtanActivateKernel(const float x)
{
if (x < -1)
{
return -1;
}
if (x > 1)
{
return 1;
}
return x;
}
__device__ float softplusActivateKernel(const float x, const float threshold)
{
if (x > threshold)
{
return x;
}
else if (x < -threshold)
{
return expf(x);
}
return logf(expf(x) + 1);
}
__device__ float plseActivateKernel(const float x)
{
if(x < -4)
{
return .01f * (x + 4);
}
if(x > 4)
{
return .01f * (x - 4) + 1;
}
return .125f*x + .5f;
}
__device__ float lhtanActivateKernel(const float x)
{
if(x < 0.0f)
{
return .001f*x;
}
if(x > 1.0f)
{
return .001f*(x-1) + 1;
}
return x;
}
__device__ float mishActivateKernel(const float x)
{
const float mishThreshHold = 20.f;
return x*tanhf(softplusActivateKernel(x, mishThreshHold));
}
__device__ float swishActivateKernel(const float x)
{
return x*logisticActivateKernel(x);
}
__device__ float activateKernel(const float x, const ActivationType actType, const float params)
{
switch (actType)
{
case LOGISTIC:
return logisticActivateKernel(x);
case LOGGY:
return loggyActivateKernel(x);
case RELU:
return reluActivateKernel(x);
case RELU6:
return relu6ActivateKernel(x);
case ELU:
return eluActivateKernel(x);
case SELU:
return seluActivateKernel(x);
case RELIE:
return relieActivateKernel(x);
case RAMP:
return rampActivateKernel(x);
case LEAKY:
return leakyActivateKernel(x, params);
case TANH:
return tanhActivateKernel(x);
case PLSE:
return plseActivateKernel(x);
case STAIR:
return stairActivateKernel(x);
case HARDTAN:
return hardtanActivateKernel(x);
case LHTAN:
return lhtanActivateKernel(x);
case SOFT_PLUS:
return softplusActivateKernel(x, params);
case MISH:
return mishActivateKernel(x);
case SWISH:
return swishActivateKernel(x);
case HARD_SWISH:
return hardSwishActivateKernel(x);
case NONE:
return x;
default:
return 0;
}
}
__global__ void activateArrayKernel(float *const x, const int numX, const ActivationType actType, const float param)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < numX)
{
x[i] = activateKernel(x[i],actType,param);
}
}
void ActivationsGPU::gpuActivateArray(float *const &gpuX, const int &numX, const ActivationType &actType, const float ¶m)
{
activateArrayKernel<<<Cuda::getGrid(numX), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(gpuX,numX,actType,param);
CUDA_CHECK(cudaPeekAtLastError());
}
__global__ void activateArrayPReluKernel(float *const x, const int numX, const int channels, float *const weights, const int whStep, const ActivationType actType)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(index < numX)
{
int i = index % whStep;
index = index / whStep;
int c = index % channels;
index = index / channels;
int b = index;
int idx = b*channels*whStep + c*whStep + i;
x[idx] = activateKernel(x[idx],actType,weights[c]);
}
}
void ActivationsGPU::gpuActivatePRelu(float *const &gpuX, const int &batch, const int &channels, float *const &gpuWeights, const int &whStep)
{
size_t numX = batch*channels*whStep;
activateArrayPReluKernel<<<Cuda::getGrid(numX), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(gpuX, numX, channels, gpuWeights, whStep, ActivationType::LEAKY);
CUDA_CHECK(cudaPeekAtLastError());
}
__global__ void activateArrayNormChKernel(float *const gpuX, const int numX, const int batch, const int channels, const int whStep, float *const gpuOutput)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int whIndex = i%whStep;
int b = whStep;
const float eps = 0.0001;
if(i < numX)
{
float sum = eps;
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
if(val > 0)
{
sum += val;
}
}
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
if(val > 0)
{
val = val/sum;
}
else
{
val = 0;
}
gpuOutput[whIndex + c*whStep + b*whStep*channels] = val;
}
}
}
void ActivationsGPU::gpuActivateArrayNormCh(float *const &gpuX, const int &numX, const int &batch, const int &channels, const int &whStep, float *const &gpuOutput)
{
activateArrayNormChKernel<<<Cuda::getGrid(numX), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(gpuX,numX,batch,channels,whStep,gpuOutput);
CUDA_CHECK(cudaPeekAtLastError());
}
__global__ void activateArrayNormChSoftMaxKernel(float *const &gpuX, const int &numX, const int &batch, const int &channels, const int &whStep, float *const &gpuOutput, const int &useMaxVal)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int whIndex = i%whStep;
int b = whStep;
const float eps = 0.0001;
if(i < numX)
{
float sum = eps;
float maxVal = -FLT_MAX;
if(useMaxVal)
{
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
if(val > maxVal || c == 0)
{
maxVal = val;
}
}
}
else
{
maxVal = 0;
}
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
sum += expf(val - maxVal);
}
for (int c = 0; c < channels; ++c)
{
float val = gpuX[whIndex + c*whStep + b*whStep*channels];
val = expf(val - maxVal)/sum;
gpuOutput[whIndex + c*whStep + b*whStep*channels] = val;
}
}
}
void ActivationsGPU::gpuActivateArrayNormChSoftMax(float *const &gpuX, const int &numX, const int &batch, const int &channels, const int &whStep, float *const &gpuOutput, const int &useMaxVal)
{
activateArrayNormChSoftMaxKernel<<<Cuda::getGrid(numX), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(gpuX,numX,batch,channels,whStep,gpuOutput,useMaxVal);
CUDA_CHECK(cudaPeekAtLastError());
}
}
|
ddf4050a9b85c3861f9594e63eeb056cbb687a83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
__global__ void
sgemm_kernel_N_T_64_16_4_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose:
========
This routine computes
C = alpha * A*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4
This code should run for any matrix size.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
if ( iby + tx >= n )
B += iby + 0;
else
B += iby + tx;
/*
Taking care of boundary cases where K < 4.
*/
if ( ty >= k )
B += __mul24( 0, ldb );
else
B += __mul24( ty, ldb );
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
int s2=lda, s3=2*lda, s4=3*lda;
switch (k) {
case 1: s2=0; s3=0; s4=0; break;
case 2: s2=lda; s3=0; s4=0; break;
case 3: s2=lda; s3=2*lda; s4=0; break;
}
C += ibx + idt + __mul24( iby, ldc );
float Ap[4] = { A[0], A[s2], A[s3], A[s4] };
float b = B[0];
const float *Bend = B + ldb*(k - k % 4);
B += 4*ldb;
A += 4*lda;
__shared__ float Bb[4][16];
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 7 ) {
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[s2];
Ap[2] = A[s3];
Ap[3] = A[s4];
b=B[0];
saxpy( Ab[0], &Bb[0][0], Cb );
saxpy( Ab[1], &Bb[1][0], Cb );
saxpy( Ab[2], &Bb[2][0], Cb );
saxpy( Ab[3], &Bb[3][0], Cb );
A += 4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
}
if ( k > 3 ) {
Bb[ty][tx]=b;
int k1 = k - k % 4;
if ( (k1+ty) >= k )
B -= 4*ldb;
else
B -= 0*ldb;
if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else
if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; }
__syncthreads();
b=B[0];
saxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0];
saxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2];
saxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3];
saxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4];
}
k = k % 4;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx]=b;
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( Ap[i], &Bb[i][0], Cb );
}
}
if ( (iby+16)>=n) {
lda = n-iby;
}
else{
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_N_T_64_16_4_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( sgemm_kernel_N_T_64_16_4_16_4), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| ddf4050a9b85c3861f9594e63eeb056cbb687a83.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
__global__ void
sgemm_kernel_N_T_64_16_4_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose:
========
This routine computes
C = alpha * A*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=4 nthd_x=16 nthd_y=4
This code should run for any matrix size.
=============================================================== */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
if ( iby + tx >= n )
B += iby + 0;
else
B += iby + tx;
/*
Taking care of boundary cases where K < 4.
*/
if ( ty >= k )
B += __mul24( 0, ldb );
else
B += __mul24( ty, ldb );
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
int s2=lda, s3=2*lda, s4=3*lda;
switch (k) {
case 1: s2=0; s3=0; s4=0; break;
case 2: s2=lda; s3=0; s4=0; break;
case 3: s2=lda; s3=2*lda; s4=0; break;
}
C += ibx + idt + __mul24( iby, ldc );
float Ap[4] = { A[0], A[s2], A[s3], A[s4] };
float b = B[0];
const float *Bend = B + ldb*(k - k % 4);
B += 4*ldb;
A += 4*lda;
__shared__ float Bb[4][16];
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 7 ) {
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[s2];
Ap[2] = A[s3];
Ap[3] = A[s4];
b=B[0];
saxpy( Ab[0], &Bb[0][0], Cb );
saxpy( Ab[1], &Bb[1][0], Cb );
saxpy( Ab[2], &Bb[2][0], Cb );
saxpy( Ab[3], &Bb[3][0], Cb );
A += 4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
}
if ( k > 3 ) {
Bb[ty][tx]=b;
int k1 = k - k % 4;
if ( (k1+ty) >= k )
B -= 4*ldb;
else
B -= 0*ldb;
if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else
if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; }
__syncthreads();
b=B[0];
saxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0];
saxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2];
saxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3];
saxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4];
}
k = k % 4;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx]=b;
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( Ap[i], &Bb[i][0], Cb );
}
}
if ( (iby+16)>=n) {
lda = n-iby;
}
else{
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_N_T_64_16_4_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
sgemm_kernel_N_T_64_16_4_16_4<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
ab0675ef65120c2b0f1b37ed25901bb2b251ab63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <time.h>
#include <math.h>
#include <algorithm>
#include <time.h>
#include "../util/cycletimer.h"
using namespace std;
typedef unsigned int uint32_t;
#define c_num 100
#define INF (100000000.0)
#define THRESHOLD (1e-7)
#define dim 3
#define PI 3.14159265f
#define BLOCK_SIZE 32
int ReadBMP(string strFile, int &size,int & width, int & height , double *&pixels, char *&head) {
FILE *fin ;
fin=fopen(strFile.c_str(),"rb");
//check file pointer
if(fin == NULL) {
cout<<"file open error!"<<endl;
return 0;
}
//check file type
short bfType;
fread(&bfType,1,sizeof(short),fin);
if(0x4d42!=bfType) {
cout<<"the file is not a bmp file!"<<endl;
return 0;
}
//get the number of pixels
fseek(fin,18,SEEK_SET) ;
fread(&width,1,sizeof(int),fin);
fread(&height,1,sizeof(int),fin);
size = width * height ;
//check the color map
fseek(fin,28,SEEK_SET) ;
unsigned short colors ;
fread(&colors,1,sizeof(unsigned short),fin);
if (colors != 24 ) {
cout << "The color map must be 24 bits" << endl ;
return 0 ;
}
//get the file header
fseek(fin,0,SEEK_SET);
head = (char *)malloc(54* sizeof(char));
fread(head,54,sizeof(char),fin);
fseek(fin,54,SEEK_SET);
//read the pixels
pixels = (double *)malloc(size * dim * sizeof(double));
for (int i = 0; i < size; i ++) {
for (int j = 0; j < dim; ++j) {
unsigned char color;
fread(&color, 1, sizeof(char), fin);
pixels[i*dim + j] = double(color);
}
}
fclose(fin);
return 0;
}
int WriteBMP(string strFile, int size, double *pixels, char *head) {
FILE *fout ;
if ((fout=fopen(strFile.c_str(),"wb"))==NULL) {
cout<<"create the bmp file error!"<<endl;
return 0;
}
fwrite(head,54,sizeof(char),fout);
for (int i = 0; i < size; ++i) {
for (int j = 0 ; j < dim; j ++) {
char temp = (char) pixels[i*dim + j];
fwrite(&temp, 1, sizeof(char), fout);
}
}
fclose(fout);
free(pixels);
return 0;
}
__global__ void myconv(int size, double* weight, int WIDTH,
int HEIGHT,
double* input_nopad,
double* output){
int w = blockIdx.x * blockDim.x + threadIdx.x;
int h = blockIdx.y * blockDim.y + threadIdx.y;
int pad_size=int(size/2);
//extern __shared__ double s[];
if (w>WIDTH || h>HEIGHT)
return;
extern __shared__ double s[];
double *mykernel=s;
double *mydata=(double*)&mykernel[size*size];
if(threadIdx.x<size*size)
mykernel[threadIdx.x]=weight[threadIdx.x];
int mydata_width=blockDim.x+pad_size*2;
if (h-pad_size<0 || w-pad_size<0)
mydata[threadIdx.y*mydata_width+threadIdx.x]=0;
else
mydata[threadIdx.y*mydata_width+threadIdx.x]=input_nopad[(h-pad_size)*WIDTH+w-pad_size];
if (h+pad_size>HEIGHT-1 || w-pad_size<0)
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x]=0;
else
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x]=input_nopad[(h+pad_size)*WIDTH+w-pad_size];
if (h-pad_size<0 || w+pad_size>WIDTH-1)
mydata[threadIdx.y*mydata_width+threadIdx.x+pad_size*2]=0;
else
mydata[threadIdx.y*mydata_width+threadIdx.x+pad_size*2]=input_nopad[(h-pad_size)*WIDTH+w+pad_size];
if(h+pad_size>HEIGHT-1 || w+pad_size>WIDTH-1)
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x+pad_size*2]=0;
else
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x+pad_size*2]=input_nopad[(h+pad_size)*WIDTH+w+pad_size];
__syncthreads();
double tmp=0;
for(int j=0;j<size;j++){
for(int i=0;i<size;i++){
tmp+=mydata[(threadIdx.y+j)*mydata_width+threadIdx.x+i]*mykernel[j*size+i];
}
}
/* double tmp=0;
for(int j=0;j<size;j++){
for(int i=0;i<size;i++){
if(w-pad_size+i<0 ||w-pad_size+i>=WIDTH-1 || h-pad_size+j<0 || h-pad_size+j>=HEIGHT-1)
continue;
tmp+=input_nopad[(h-pad_size+j)*WIDTH+w-pad_size+i]*mykernel[j*size+i];
}
} */
output[h*WIDTH+w]=tmp;
}
void conv(int size, double* weight, int WIDTH,
int HEIGHT,
double* input_nopad,
double* output){
dim3 numBlocks(ceil(WIDTH/BLOCK_SIZE),ceil(HEIGHT/BLOCK_SIZE),1);
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
double *d_input;
hipMalloc(&d_input, sizeof(double)*WIDTH*HEIGHT);
hipMemcpy(d_input,input_nopad, sizeof(double)*WIDTH*HEIGHT,hipMemcpyHostToDevice);
double *d_weight;
hipMalloc(&d_weight, sizeof(double)*size*size);
hipMemcpy(d_weight,weight, sizeof(double)*size*size,hipMemcpyHostToDevice);
double *d_result;
hipMalloc(&d_result, sizeof(double)*WIDTH*HEIGHT);
hipLaunchKernelGGL(( myconv), dim3(numBlocks),dim3(threadsPerBlock),sizeof(double)*2048, 0, size,d_weight,WIDTH,HEIGHT,d_input,d_result);
//hipDeviceSynchronize();
hipMemcpy(output,d_result, sizeof(double)*WIDTH*HEIGHT,hipMemcpyDeviceToHost);
}
/*void conv(int size, double* weight, int WIDTH,
int HEIGHT,
double* input_nopad,
double* output){
double * input = (double *)calloc((WIDTH+size-1) * (HEIGHT+size-1), sizeof(double));
int pad_size=int(size/2);
for (int j=0; j<HEIGHT; j++) {
for (int i=0; i<WIDTH; i++)
input[(j+pad_size)*(WIDTH+size-1)+i+pad_size]=input_nopad[j*WIDTH+i];
}
for (int j=0; j<HEIGHT; j++) {
for (int i=0; i<WIDTH; i++) {
double tmp = 0;
for (int jj=0; jj<size; jj++)
for (int ii=0; ii<size; ii++)
tmp += input[(j+jj)*(WIDTH+size-1) + (i+ii)] * weight[jj*size + ii];
output[j*WIDTH + i] = int(tmp);
}
}
}*/
__global__ void gray(int size, double* input, double* output){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx>size-1)
return;
output[idx]=int((input[3*idx]+input[3*idx+1]+input[3*idx+2])/3);
}
void colorful(int size, double* input, double* output){
for (int i = 0; i < size; ++i)
{
output[3*i]=int(input[i]);
output[3*i+1]=int(input[i]);
output[3*i+2]=int(input[i]);
}
}
void sobel_suppression(int width,int height,double* sobel_ximg,
double* sobel_yimg,double* max_suppressed){
int size=width*height;
double max_sobel=0;
double angle=0;
double *sobel_img=(double *)malloc(size * sizeof(double));
double *sobel_direction=(double *)malloc(size * sizeof(double));
for (int i = 0; i < size; ++i){
sobel_img[i]=sqrt(sobel_ximg[i]*sobel_ximg[i]+sobel_yimg[i]*sobel_yimg[i]);
max_sobel=sobel_img[i]>max_sobel ? sobel_img[i] : max_sobel;
if ((sobel_ximg[i] != 0.0) || (sobel_yimg[i] != 0.0)) {
angle = atan2(sobel_yimg[i], sobel_ximg[i]) * 180.0 / PI;
} else {
angle = 0.0;
}
if(angle<0)
angle+=180;
sobel_direction[i] =angle;
}
/* for (int i = 0; i < size; ++i){
sobel_img[i]=int(255.0f * sobel_img[i] / max_sobel);
}*/
int maxmum=0;
for (int i = 10; i < width-10; ++i)
{
for (int j = 10; j < height-10; ++j)
{
if(sobel_img[j*width+i]>maxmum){
maxmum=sobel_img[j*width+i];
}
}
}
for (int i = 0; i < size; ++i)
{
sobel_img[i]=int(255.0f * sobel_img[i] / maxmum);
if(sobel_img[i]>=255){
sobel_img[i]=255;
}
}
double p=0;
double q=0;
for (int i = 1; i < width-1; ++i){
for (int j = 1; j < height-1; ++j){
double angle=sobel_direction[j*width+i];
if (angle < 22.5 || angle >= 157.5) {
p=sobel_img[j*width+i-1];
q=sobel_img[j*width+i+1];
} else if (angle >= 22.5 && angle < 67.5) {
p=sobel_img[(j+1)*width+i-1];
q=sobel_img[(j-1)*width+i+1];
} else if (angle >= 67.5 && angle < 112.5) {
p=sobel_img[(j+1)*width+i];
q=sobel_img[(j-1)*width+i];
} else{
p=sobel_img[(j+1)*width+i+1];
q=sobel_img[(j-1)*width+i-1];
}
if(sobel_img[j*width+i]>=p && sobel_img[j*width+i]>=q )
max_suppressed[j*width+i]=sobel_img[j*width+i];
}
}
}
void hysteresis(int width,int height,double* max_suppressed,double* hysteresis_output,int thre1,int thre2){
int size=width*height;
int sum=0;
for(int i=0;i<size;i++){
if(max_suppressed[i]>=thre2){
hysteresis_output[i]=255;
sum++;
}else if (max_suppressed[i]<thre1){
hysteresis_output[i]=0;
}else{
hysteresis_output[i]=thre1;
}
}
//printf("%d\n",sum );
for (int i = 1; i < width-1; i++) {
for (int j = 1; j < height - 1; j++) {
int src_pos = i + (j * width);
if (hysteresis_output[src_pos] == thre1) {
if (hysteresis_output[src_pos - 1] == 255 ||
hysteresis_output[src_pos + 1] == 255 ||
hysteresis_output[src_pos - 1 - width] == 255 ||
hysteresis_output[src_pos + 1 - width] == 255 ||
hysteresis_output[src_pos + width] == 255 ||
hysteresis_output[src_pos - width] == 255 ||
hysteresis_output[src_pos + width - 1] == 255 ||
hysteresis_output[src_pos + width + 1] == 255) {
hysteresis_output[src_pos] = 255;
} else {
hysteresis_output[src_pos] = 0;
}
}
}
}
}
void canny(double* pixels,double *canny_output, int size, int width,int height, int thre1,int thre2 ){
dim3 numBlocks(ceil(width/BLOCK_SIZE),ceil(height/BLOCK_SIZE),1);
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
double Gaussian[]={2,4,5,4,2,4,9,12,9,4,5,12,15,12,5,4,9,12,9,4,2,4,5,4,2};
for (int i=0;i<25;i++){
Gaussian[i]=Gaussian[i]/159;
}
double *d_Gaussian;
hipMalloc(&d_Gaussian, sizeof(double)*25);
hipMemcpy(d_Gaussian,Gaussian, sizeof(double)*25,hipMemcpyHostToDevice);
double sobel_x[]={-1,0,1,-2,0,2,-1,0,1};
double sobel_y[]={1,2,1,0,0,0,-1,-2,-1};
/* double *grayimg=(double *)malloc(size* sizeof(double));
gray(size,pixels,grayimg);*/
double *d_pixels;
hipMalloc(&d_pixels, sizeof(double)*size*dim);
hipMemcpy(d_pixels,pixels, sizeof(double)*size*dim,hipMemcpyHostToDevice);
double *grayimg;
hipMalloc(&grayimg, sizeof(double)*size);
hipLaunchKernelGGL(( gray), dim3(ceil(size/512)),dim3(512), 0, 0, size,d_pixels,grayimg);
double *mygrayimg=(double *)malloc(size* sizeof(double));
hipMemcpy(mygrayimg,grayimg, sizeof(double)*size,hipMemcpyDeviceToHost);
double *gaussianimg=(double *)malloc(size* sizeof(double));
conv(5, Gaussian, width,height, mygrayimg, gaussianimg);
/* double *gaussianimg;
hipMalloc(&gaussianimg, sizeof(double)*size);
myconv<<<numBlocks,threadsPerBlock,sizeof(double)*2000>>>(5, d_Gaussian, width,height, grayimg, gaussianimg);
hipDeviceSynchronize();
hipMemcpy(canny_output,gaussianimg, sizeof(double)*width*height,hipMemcpyDeviceToHost);*/
double *sobel_ximg=(double *)malloc(size * sizeof(double));
double *sobel_yimg=(double *)malloc(size * sizeof(double));
conv(3, sobel_x, width,height, gaussianimg, sobel_ximg);
conv(3, sobel_y, width,height, gaussianimg, sobel_yimg);
double *max_suppressed=(double *) calloc(size,sizeof(double));
sobel_suppression(width,height,sobel_ximg,sobel_yimg, max_suppressed);
hysteresis(width,height,max_suppressed,canny_output,thre1,thre2);
}
void dilate(double* input, double* output, int kernel_size,int width,int height){
double* weight=(double *)malloc(kernel_size *kernel_size * sizeof(double));
for(int i=0;i<kernel_size*kernel_size;i++)
weight[i]=1;
conv(kernel_size, weight, width,height,input,output);
for(int i=0;i<width*height;i++){
if(output[i]>0)
output[i]=255;
}
}
void blur(double* input, double* output, int kernel_size,int width,int height){
double* weight=(double *)malloc(kernel_size *kernel_size * sizeof(double));
for(int i=0;i<kernel_size*kernel_size;i++)
weight[i]=float(1)/float(kernel_size*kernel_size);
conv(kernel_size, weight, width,height,input,output);
for(int i=0;i<width*height;i++){
if(output[i]>127){
output[i]=255;
}else{
output[i]=0;
}
}
}
__global__ void edge2color_kernel(double* input,double* output,int width,int height){
int w = blockIdx.x * blockDim.x + threadIdx.x;
int h = blockIdx.y * blockDim.y + threadIdx.y;
//extern __shared__ double s[];
__shared__ double mydata[BLOCK_SIZE+2][BLOCK_SIZE+2][3];
__syncthreads();
if(w-1>=0&&h-1>=0){
mydata[threadIdx.y][threadIdx.x][0]=input[3*((h-1)*width+w-1)];
mydata[threadIdx.y][threadIdx.x][1]=input[3*((h-1)*width+w-1)+1];
mydata[threadIdx.y][threadIdx.x][2]=input[3*((h-1)*width+w-1)+2];
}
else{
mydata[threadIdx.y][threadIdx.x][0]=255;
mydata[threadIdx.y][threadIdx.x][1]=255;
mydata[threadIdx.y][threadIdx.x][2]=255;
}
if(w-1>=0&&h+1<height){
mydata[threadIdx.y+2][threadIdx.x][0]=input[3*((h+1)*width+w-1)];
mydata[threadIdx.y+2][threadIdx.x][1]=input[3*((h+1)*width+w-1)+1];
mydata[threadIdx.y+2][threadIdx.x][2]=input[3*((h+1)*width+w-1)+2];
}
else{
mydata[threadIdx.y+2][threadIdx.x][0]=255;
mydata[threadIdx.y+2][threadIdx.x][1]=255;
mydata[threadIdx.y+2][threadIdx.x][2]=255;
}
if(w+1<width &&h-1>=0){
mydata[threadIdx.y][threadIdx.x+2][0]=input[3*((h-1)*width+w+1)];
mydata[threadIdx.y][threadIdx.x+2][1]=input[3*((h-1)*width+w+1)+1];
mydata[threadIdx.y][threadIdx.x+2][2]=input[3*((h-1)*width+w+1)+2];
}
else{
mydata[threadIdx.y][threadIdx.x+2][0]=255;
mydata[threadIdx.y][threadIdx.x+2][1]=255;
mydata[threadIdx.y][threadIdx.x+2][2]=255;
}
if(w+1<width&&h+1<height){
mydata[threadIdx.y+2][threadIdx.x+2][0]=input[3*((h+1)*width+w+1)];
mydata[threadIdx.y+2][threadIdx.x+2][1]=input[3*((h+1)*width+w+1)+1];
mydata[threadIdx.y+2][threadIdx.x+2][2]=input[3*((h+1)*width+w+1)+2];
}
else{
mydata[threadIdx.y+2][threadIdx.x+2][0]=255;
mydata[threadIdx.y+2][threadIdx.x+2][1]=255;
mydata[threadIdx.y+2][threadIdx.x+2][2]=255;
}
__syncthreads();
if (w>width || h>height)
return;
if (mydata[threadIdx.y+1][threadIdx.x+1][0]!=255 || mydata[threadIdx.y+1][threadIdx.x+1][1]!=255|| mydata[threadIdx.y+1][threadIdx.x+1][2]!=255){
output[dim*(h*width+w)]=input[dim*(h*width+w)];
output[dim*(h*width+w)+1]=input[dim*(h*width+w)+1];
output[dim*(h*width+w)+2]=input[dim*(h*width+w)+2];
return;
}
int color1=255;
int color2=255;
int color3=255;
int sum_color=255*3;
for(int p=threadIdx.y;p<threadIdx.y+3;p++){
for(int q=threadIdx.x;q<threadIdx.x+3;q++){
int tmp_sum=mydata[p][q][0]+mydata[p][q][1]+mydata[p][q][2];
if (tmp_sum<sum_color){
sum_color=tmp_sum;
color1=mydata[p][q][0];
color2=mydata[p][q][1];
color3=mydata[p][q][2];
}
}
}
output[dim*(h*width+w)]=color1;
output[dim*(h*width+w)+1]=color2;
output[dim*(h*width+w)+2]=color3;
}
void edge2color(double* origin, double* input, double* output,int width,int height){
dim3 numBlocks(ceil(width/BLOCK_SIZE),ceil(height/BLOCK_SIZE),1);
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
int size=width*height;
colorful(size,input, output);
double* tmp=(double *)malloc(size*dim * sizeof(double));
for(int i=0;i<size*dim;i++){
output[i]+=origin[i];
if(output[i]>255)
output[i]=255;
}
double *d_input;
double *d_output;
hipMalloc(&d_input, sizeof(double)*size*dim);
hipMalloc(&d_output, sizeof(double)*size*dim);
while(1){
hipMemcpy(d_input,output, sizeof(double)*size*dim,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( edge2color_kernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_input,d_output,width,height);
hipDeviceSynchronize();
hipMemcpy(output,d_output, sizeof(double)*size*dim,hipMemcpyDeviceToHost);
/* for (int i = 0; i < size*dim; ++i)
{
tmp[i]=output[i];
}
for(int i=0;i<width;i++){
for (int j = 0; j < height; j++)
{
if(tmp[3*(j*width+i)]!=255 || tmp[3*(j*width+i)+1]!=255 || tmp[3*(j*width+i)+2]!=255)
continue;
int color1=0;
int color2=0;
int color3=0;
int sum_color=255*3;
for(int p=max(0,i-1);p<min(width,i+2);p++){
for(int q=max(0,j-1);q<min(height,j+2);q++){
int tmp_sum=tmp[3*(q*width+p)]+tmp[3*(q*width+p)+1]+tmp[3*(q*width+p)+2];
if (tmp_sum<sum_color){
sum_color=tmp_sum;
output[3*(j*width+i)]=tmp[3*(q*width+p)];
output[3*(j*width+i)+1]=tmp[3*(q*width+p)+1];
output[3*(j*width+i)+2]=tmp[3*(q*width+p)+2];
}
}
}
}
}*/
int flag=false;
for(int i=0;i<size;i++){
if(output[3*i]==255 && output[3*i+1]==255 && output[3*i+2]==255){
flag=true;
break;
}
}
if(!flag)
break;
}
}
int main(int argc, char ** argv){
double * pixels;
char *head;
string inputFile = "colorBlock.bmp";
string outputFile = "finalResult.bmp";
int size = 0;
int width=0;
int height=0;
int dilate_size=5;
int blur_size=5;
if (argc == 1) {
cout << "input file name: ";
char line[100];
cin.getline(line, 100);
if (strlen(line) > 0)
inputFile = string(line);
else
cout << inputFile << endl;
cout << "output file name: ";
cin.getline(line, 100);
if (strlen(line) > 0)
outputFile = string(line);
else
cout << outputFile << endl;
cout << "dilate size: ";
cin.getline(line, 100);
if (strlen(line) > 0)
dilate_size = atoi(line);
else
cout << dilate_size << endl;
cout << "blur size: ";
cin.getline(line, 100);
if (strlen(line) > 0)
blur_size = atoi(line);
else
cout << blur_size << endl;
}
if (argc>=2) {
inputFile = argv[1];
}
if (argc>=3) {
outputFile = argv[2];
}
if (argc>=4) {
dilate_size=atoi(argv[3]);
}
if (argc>=5) {
blur_size=atoi(argv[4]);
}
ReadBMP(inputFile, size,width,height, pixels, head);
clock_t cstart=clock();
double *canny_output=(double *)malloc(size * sizeof(double));
canny(pixels,canny_output,size,width,height,5,6);
double *dilate_output=(double *)malloc(size * sizeof(double));
dilate(canny_output,dilate_output,dilate_size,width,height);
double* final_output=(double *)malloc(dim*size * sizeof(double));
double *blur_output=(double *)malloc(size * sizeof(double));
blur(dilate_output,blur_output,blur_size,width,height);
clock_t cstart_e2c=clock();
double *output=(double *)malloc(dim*size * sizeof(double));
edge2color(pixels,blur_output,output,width,height);
clock_t cend_e2c=clock();
clock_t cend=clock();
//colorful(size,canny_output, output);
WriteBMP(outputFile, size, output, head);
cout<<"total smoothing time: "<<(cend-cstart)/1000000.0 << " s" <<endl;
cout<<"edge2color time: "<<(cend_e2c-cstart_e2c)/1000000.0 << " s" <<endl;
return 0;
}
| ab0675ef65120c2b0f1b37ed25901bb2b251ab63.cu | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <time.h>
#include <math.h>
#include <algorithm>
#include <time.h>
#include "../util/cycletimer.h"
using namespace std;
typedef unsigned int uint32_t;
#define c_num 100
#define INF (100000000.0)
#define THRESHOLD (1e-7)
#define dim 3
#define PI 3.14159265f
#define BLOCK_SIZE 32
int ReadBMP(string strFile, int &size,int & width, int & height , double *&pixels, char *&head) {
FILE *fin ;
fin=fopen(strFile.c_str(),"rb");
//check file pointer
if(fin == NULL) {
cout<<"file open error!"<<endl;
return 0;
}
//check file type
short bfType;
fread(&bfType,1,sizeof(short),fin);
if(0x4d42!=bfType) {
cout<<"the file is not a bmp file!"<<endl;
return 0;
}
//get the number of pixels
fseek(fin,18,SEEK_SET) ;
fread(&width,1,sizeof(int),fin);
fread(&height,1,sizeof(int),fin);
size = width * height ;
//check the color map
fseek(fin,28,SEEK_SET) ;
unsigned short colors ;
fread(&colors,1,sizeof(unsigned short),fin);
if (colors != 24 ) {
cout << "The color map must be 24 bits" << endl ;
return 0 ;
}
//get the file header
fseek(fin,0,SEEK_SET);
head = (char *)malloc(54* sizeof(char));
fread(head,54,sizeof(char),fin);
fseek(fin,54,SEEK_SET);
//read the pixels
pixels = (double *)malloc(size * dim * sizeof(double));
for (int i = 0; i < size; i ++) {
for (int j = 0; j < dim; ++j) {
unsigned char color;
fread(&color, 1, sizeof(char), fin);
pixels[i*dim + j] = double(color);
}
}
fclose(fin);
return 0;
}
int WriteBMP(string strFile, int size, double *pixels, char *head) {
FILE *fout ;
if ((fout=fopen(strFile.c_str(),"wb"))==NULL) {
cout<<"create the bmp file error!"<<endl;
return 0;
}
fwrite(head,54,sizeof(char),fout);
for (int i = 0; i < size; ++i) {
for (int j = 0 ; j < dim; j ++) {
char temp = (char) pixels[i*dim + j];
fwrite(&temp, 1, sizeof(char), fout);
}
}
fclose(fout);
free(pixels);
return 0;
}
__global__ void myconv(int size, double* weight, int WIDTH,
int HEIGHT,
double* input_nopad,
double* output){
int w = blockIdx.x * blockDim.x + threadIdx.x;
int h = blockIdx.y * blockDim.y + threadIdx.y;
int pad_size=int(size/2);
//extern __shared__ double s[];
if (w>WIDTH || h>HEIGHT)
return;
extern __shared__ double s[];
double *mykernel=s;
double *mydata=(double*)&mykernel[size*size];
if(threadIdx.x<size*size)
mykernel[threadIdx.x]=weight[threadIdx.x];
int mydata_width=blockDim.x+pad_size*2;
if (h-pad_size<0 || w-pad_size<0)
mydata[threadIdx.y*mydata_width+threadIdx.x]=0;
else
mydata[threadIdx.y*mydata_width+threadIdx.x]=input_nopad[(h-pad_size)*WIDTH+w-pad_size];
if (h+pad_size>HEIGHT-1 || w-pad_size<0)
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x]=0;
else
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x]=input_nopad[(h+pad_size)*WIDTH+w-pad_size];
if (h-pad_size<0 || w+pad_size>WIDTH-1)
mydata[threadIdx.y*mydata_width+threadIdx.x+pad_size*2]=0;
else
mydata[threadIdx.y*mydata_width+threadIdx.x+pad_size*2]=input_nopad[(h-pad_size)*WIDTH+w+pad_size];
if(h+pad_size>HEIGHT-1 || w+pad_size>WIDTH-1)
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x+pad_size*2]=0;
else
mydata[(threadIdx.y+pad_size*2)*mydata_width+threadIdx.x+pad_size*2]=input_nopad[(h+pad_size)*WIDTH+w+pad_size];
__syncthreads();
double tmp=0;
for(int j=0;j<size;j++){
for(int i=0;i<size;i++){
tmp+=mydata[(threadIdx.y+j)*mydata_width+threadIdx.x+i]*mykernel[j*size+i];
}
}
/* double tmp=0;
for(int j=0;j<size;j++){
for(int i=0;i<size;i++){
if(w-pad_size+i<0 ||w-pad_size+i>=WIDTH-1 || h-pad_size+j<0 || h-pad_size+j>=HEIGHT-1)
continue;
tmp+=input_nopad[(h-pad_size+j)*WIDTH+w-pad_size+i]*mykernel[j*size+i];
}
} */
output[h*WIDTH+w]=tmp;
}
void conv(int size, double* weight, int WIDTH,
int HEIGHT,
double* input_nopad,
double* output){
dim3 numBlocks(ceil(WIDTH/BLOCK_SIZE),ceil(HEIGHT/BLOCK_SIZE),1);
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
double *d_input;
cudaMalloc(&d_input, sizeof(double)*WIDTH*HEIGHT);
cudaMemcpy(d_input,input_nopad, sizeof(double)*WIDTH*HEIGHT,cudaMemcpyHostToDevice);
double *d_weight;
cudaMalloc(&d_weight, sizeof(double)*size*size);
cudaMemcpy(d_weight,weight, sizeof(double)*size*size,cudaMemcpyHostToDevice);
double *d_result;
cudaMalloc(&d_result, sizeof(double)*WIDTH*HEIGHT);
myconv<<<numBlocks,threadsPerBlock,sizeof(double)*2048>>>(size,d_weight,WIDTH,HEIGHT,d_input,d_result);
//cudaThreadSynchronize();
cudaMemcpy(output,d_result, sizeof(double)*WIDTH*HEIGHT,cudaMemcpyDeviceToHost);
}
/*void conv(int size, double* weight, int WIDTH,
int HEIGHT,
double* input_nopad,
double* output){
double * input = (double *)calloc((WIDTH+size-1) * (HEIGHT+size-1), sizeof(double));
int pad_size=int(size/2);
for (int j=0; j<HEIGHT; j++) {
for (int i=0; i<WIDTH; i++)
input[(j+pad_size)*(WIDTH+size-1)+i+pad_size]=input_nopad[j*WIDTH+i];
}
for (int j=0; j<HEIGHT; j++) {
for (int i=0; i<WIDTH; i++) {
double tmp = 0;
for (int jj=0; jj<size; jj++)
for (int ii=0; ii<size; ii++)
tmp += input[(j+jj)*(WIDTH+size-1) + (i+ii)] * weight[jj*size + ii];
output[j*WIDTH + i] = int(tmp);
}
}
}*/
__global__ void gray(int size, double* input, double* output){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx>size-1)
return;
output[idx]=int((input[3*idx]+input[3*idx+1]+input[3*idx+2])/3);
}
void colorful(int size, double* input, double* output){
for (int i = 0; i < size; ++i)
{
output[3*i]=int(input[i]);
output[3*i+1]=int(input[i]);
output[3*i+2]=int(input[i]);
}
}
void sobel_suppression(int width,int height,double* sobel_ximg,
double* sobel_yimg,double* max_suppressed){
int size=width*height;
double max_sobel=0;
double angle=0;
double *sobel_img=(double *)malloc(size * sizeof(double));
double *sobel_direction=(double *)malloc(size * sizeof(double));
for (int i = 0; i < size; ++i){
sobel_img[i]=sqrt(sobel_ximg[i]*sobel_ximg[i]+sobel_yimg[i]*sobel_yimg[i]);
max_sobel=sobel_img[i]>max_sobel ? sobel_img[i] : max_sobel;
if ((sobel_ximg[i] != 0.0) || (sobel_yimg[i] != 0.0)) {
angle = atan2(sobel_yimg[i], sobel_ximg[i]) * 180.0 / PI;
} else {
angle = 0.0;
}
if(angle<0)
angle+=180;
sobel_direction[i] =angle;
}
/* for (int i = 0; i < size; ++i){
sobel_img[i]=int(255.0f * sobel_img[i] / max_sobel);
}*/
int maxmum=0;
for (int i = 10; i < width-10; ++i)
{
for (int j = 10; j < height-10; ++j)
{
if(sobel_img[j*width+i]>maxmum){
maxmum=sobel_img[j*width+i];
}
}
}
for (int i = 0; i < size; ++i)
{
sobel_img[i]=int(255.0f * sobel_img[i] / maxmum);
if(sobel_img[i]>=255){
sobel_img[i]=255;
}
}
double p=0;
double q=0;
for (int i = 1; i < width-1; ++i){
for (int j = 1; j < height-1; ++j){
double angle=sobel_direction[j*width+i];
if (angle < 22.5 || angle >= 157.5) {
p=sobel_img[j*width+i-1];
q=sobel_img[j*width+i+1];
} else if (angle >= 22.5 && angle < 67.5) {
p=sobel_img[(j+1)*width+i-1];
q=sobel_img[(j-1)*width+i+1];
} else if (angle >= 67.5 && angle < 112.5) {
p=sobel_img[(j+1)*width+i];
q=sobel_img[(j-1)*width+i];
} else{
p=sobel_img[(j+1)*width+i+1];
q=sobel_img[(j-1)*width+i-1];
}
if(sobel_img[j*width+i]>=p && sobel_img[j*width+i]>=q )
max_suppressed[j*width+i]=sobel_img[j*width+i];
}
}
}
void hysteresis(int width,int height,double* max_suppressed,double* hysteresis_output,int thre1,int thre2){
int size=width*height;
int sum=0;
for(int i=0;i<size;i++){
if(max_suppressed[i]>=thre2){
hysteresis_output[i]=255;
sum++;
}else if (max_suppressed[i]<thre1){
hysteresis_output[i]=0;
}else{
hysteresis_output[i]=thre1;
}
}
//printf("%d\n",sum );
for (int i = 1; i < width-1; i++) {
for (int j = 1; j < height - 1; j++) {
int src_pos = i + (j * width);
if (hysteresis_output[src_pos] == thre1) {
if (hysteresis_output[src_pos - 1] == 255 ||
hysteresis_output[src_pos + 1] == 255 ||
hysteresis_output[src_pos - 1 - width] == 255 ||
hysteresis_output[src_pos + 1 - width] == 255 ||
hysteresis_output[src_pos + width] == 255 ||
hysteresis_output[src_pos - width] == 255 ||
hysteresis_output[src_pos + width - 1] == 255 ||
hysteresis_output[src_pos + width + 1] == 255) {
hysteresis_output[src_pos] = 255;
} else {
hysteresis_output[src_pos] = 0;
}
}
}
}
}
void canny(double* pixels,double *canny_output, int size, int width,int height, int thre1,int thre2 ){
dim3 numBlocks(ceil(width/BLOCK_SIZE),ceil(height/BLOCK_SIZE),1);
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
double Gaussian[]={2,4,5,4,2,4,9,12,9,4,5,12,15,12,5,4,9,12,9,4,2,4,5,4,2};
for (int i=0;i<25;i++){
Gaussian[i]=Gaussian[i]/159;
}
double *d_Gaussian;
cudaMalloc(&d_Gaussian, sizeof(double)*25);
cudaMemcpy(d_Gaussian,Gaussian, sizeof(double)*25,cudaMemcpyHostToDevice);
double sobel_x[]={-1,0,1,-2,0,2,-1,0,1};
double sobel_y[]={1,2,1,0,0,0,-1,-2,-1};
/* double *grayimg=(double *)malloc(size* sizeof(double));
gray(size,pixels,grayimg);*/
double *d_pixels;
cudaMalloc(&d_pixels, sizeof(double)*size*dim);
cudaMemcpy(d_pixels,pixels, sizeof(double)*size*dim,cudaMemcpyHostToDevice);
double *grayimg;
cudaMalloc(&grayimg, sizeof(double)*size);
gray<<<ceil(size/512),512>>>(size,d_pixels,grayimg);
double *mygrayimg=(double *)malloc(size* sizeof(double));
cudaMemcpy(mygrayimg,grayimg, sizeof(double)*size,cudaMemcpyDeviceToHost);
double *gaussianimg=(double *)malloc(size* sizeof(double));
conv(5, Gaussian, width,height, mygrayimg, gaussianimg);
/* double *gaussianimg;
cudaMalloc(&gaussianimg, sizeof(double)*size);
myconv<<<numBlocks,threadsPerBlock,sizeof(double)*2000>>>(5, d_Gaussian, width,height, grayimg, gaussianimg);
cudaThreadSynchronize();
cudaMemcpy(canny_output,gaussianimg, sizeof(double)*width*height,cudaMemcpyDeviceToHost);*/
double *sobel_ximg=(double *)malloc(size * sizeof(double));
double *sobel_yimg=(double *)malloc(size * sizeof(double));
conv(3, sobel_x, width,height, gaussianimg, sobel_ximg);
conv(3, sobel_y, width,height, gaussianimg, sobel_yimg);
double *max_suppressed=(double *) calloc(size,sizeof(double));
sobel_suppression(width,height,sobel_ximg,sobel_yimg, max_suppressed);
hysteresis(width,height,max_suppressed,canny_output,thre1,thre2);
}
void dilate(double* input, double* output, int kernel_size,int width,int height){
double* weight=(double *)malloc(kernel_size *kernel_size * sizeof(double));
for(int i=0;i<kernel_size*kernel_size;i++)
weight[i]=1;
conv(kernel_size, weight, width,height,input,output);
for(int i=0;i<width*height;i++){
if(output[i]>0)
output[i]=255;
}
}
void blur(double* input, double* output, int kernel_size,int width,int height){
double* weight=(double *)malloc(kernel_size *kernel_size * sizeof(double));
for(int i=0;i<kernel_size*kernel_size;i++)
weight[i]=float(1)/float(kernel_size*kernel_size);
conv(kernel_size, weight, width,height,input,output);
for(int i=0;i<width*height;i++){
if(output[i]>127){
output[i]=255;
}else{
output[i]=0;
}
}
}
__global__ void edge2color_kernel(double* input,double* output,int width,int height){
int w = blockIdx.x * blockDim.x + threadIdx.x;
int h = blockIdx.y * blockDim.y + threadIdx.y;
//extern __shared__ double s[];
__shared__ double mydata[BLOCK_SIZE+2][BLOCK_SIZE+2][3];
__syncthreads();
if(w-1>=0&&h-1>=0){
mydata[threadIdx.y][threadIdx.x][0]=input[3*((h-1)*width+w-1)];
mydata[threadIdx.y][threadIdx.x][1]=input[3*((h-1)*width+w-1)+1];
mydata[threadIdx.y][threadIdx.x][2]=input[3*((h-1)*width+w-1)+2];
}
else{
mydata[threadIdx.y][threadIdx.x][0]=255;
mydata[threadIdx.y][threadIdx.x][1]=255;
mydata[threadIdx.y][threadIdx.x][2]=255;
}
if(w-1>=0&&h+1<height){
mydata[threadIdx.y+2][threadIdx.x][0]=input[3*((h+1)*width+w-1)];
mydata[threadIdx.y+2][threadIdx.x][1]=input[3*((h+1)*width+w-1)+1];
mydata[threadIdx.y+2][threadIdx.x][2]=input[3*((h+1)*width+w-1)+2];
}
else{
mydata[threadIdx.y+2][threadIdx.x][0]=255;
mydata[threadIdx.y+2][threadIdx.x][1]=255;
mydata[threadIdx.y+2][threadIdx.x][2]=255;
}
if(w+1<width &&h-1>=0){
mydata[threadIdx.y][threadIdx.x+2][0]=input[3*((h-1)*width+w+1)];
mydata[threadIdx.y][threadIdx.x+2][1]=input[3*((h-1)*width+w+1)+1];
mydata[threadIdx.y][threadIdx.x+2][2]=input[3*((h-1)*width+w+1)+2];
}
else{
mydata[threadIdx.y][threadIdx.x+2][0]=255;
mydata[threadIdx.y][threadIdx.x+2][1]=255;
mydata[threadIdx.y][threadIdx.x+2][2]=255;
}
if(w+1<width&&h+1<height){
mydata[threadIdx.y+2][threadIdx.x+2][0]=input[3*((h+1)*width+w+1)];
mydata[threadIdx.y+2][threadIdx.x+2][1]=input[3*((h+1)*width+w+1)+1];
mydata[threadIdx.y+2][threadIdx.x+2][2]=input[3*((h+1)*width+w+1)+2];
}
else{
mydata[threadIdx.y+2][threadIdx.x+2][0]=255;
mydata[threadIdx.y+2][threadIdx.x+2][1]=255;
mydata[threadIdx.y+2][threadIdx.x+2][2]=255;
}
__syncthreads();
if (w>width || h>height)
return;
if (mydata[threadIdx.y+1][threadIdx.x+1][0]!=255 || mydata[threadIdx.y+1][threadIdx.x+1][1]!=255|| mydata[threadIdx.y+1][threadIdx.x+1][2]!=255){
output[dim*(h*width+w)]=input[dim*(h*width+w)];
output[dim*(h*width+w)+1]=input[dim*(h*width+w)+1];
output[dim*(h*width+w)+2]=input[dim*(h*width+w)+2];
return;
}
int color1=255;
int color2=255;
int color3=255;
int sum_color=255*3;
for(int p=threadIdx.y;p<threadIdx.y+3;p++){
for(int q=threadIdx.x;q<threadIdx.x+3;q++){
int tmp_sum=mydata[p][q][0]+mydata[p][q][1]+mydata[p][q][2];
if (tmp_sum<sum_color){
sum_color=tmp_sum;
color1=mydata[p][q][0];
color2=mydata[p][q][1];
color3=mydata[p][q][2];
}
}
}
output[dim*(h*width+w)]=color1;
output[dim*(h*width+w)+1]=color2;
output[dim*(h*width+w)+2]=color3;
}
void edge2color(double* origin, double* input, double* output,int width,int height){
dim3 numBlocks(ceil(width/BLOCK_SIZE),ceil(height/BLOCK_SIZE),1);
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
int size=width*height;
colorful(size,input, output);
double* tmp=(double *)malloc(size*dim * sizeof(double));
for(int i=0;i<size*dim;i++){
output[i]+=origin[i];
if(output[i]>255)
output[i]=255;
}
double *d_input;
double *d_output;
cudaMalloc(&d_input, sizeof(double)*size*dim);
cudaMalloc(&d_output, sizeof(double)*size*dim);
while(1){
cudaMemcpy(d_input,output, sizeof(double)*size*dim,cudaMemcpyHostToDevice);
edge2color_kernel<<<numBlocks,threadsPerBlock>>>(d_input,d_output,width,height);
cudaThreadSynchronize();
cudaMemcpy(output,d_output, sizeof(double)*size*dim,cudaMemcpyDeviceToHost);
/* for (int i = 0; i < size*dim; ++i)
{
tmp[i]=output[i];
}
for(int i=0;i<width;i++){
for (int j = 0; j < height; j++)
{
if(tmp[3*(j*width+i)]!=255 || tmp[3*(j*width+i)+1]!=255 || tmp[3*(j*width+i)+2]!=255)
continue;
int color1=0;
int color2=0;
int color3=0;
int sum_color=255*3;
for(int p=max(0,i-1);p<min(width,i+2);p++){
for(int q=max(0,j-1);q<min(height,j+2);q++){
int tmp_sum=tmp[3*(q*width+p)]+tmp[3*(q*width+p)+1]+tmp[3*(q*width+p)+2];
if (tmp_sum<sum_color){
sum_color=tmp_sum;
output[3*(j*width+i)]=tmp[3*(q*width+p)];
output[3*(j*width+i)+1]=tmp[3*(q*width+p)+1];
output[3*(j*width+i)+2]=tmp[3*(q*width+p)+2];
}
}
}
}
}*/
int flag=false;
for(int i=0;i<size;i++){
if(output[3*i]==255 && output[3*i+1]==255 && output[3*i+2]==255){
flag=true;
break;
}
}
if(!flag)
break;
}
}
int main(int argc, char ** argv){
double * pixels;
char *head;
string inputFile = "colorBlock.bmp";
string outputFile = "finalResult.bmp";
int size = 0;
int width=0;
int height=0;
int dilate_size=5;
int blur_size=5;
if (argc == 1) {
cout << "input file name: ";
char line[100];
cin.getline(line, 100);
if (strlen(line) > 0)
inputFile = string(line);
else
cout << inputFile << endl;
cout << "output file name: ";
cin.getline(line, 100);
if (strlen(line) > 0)
outputFile = string(line);
else
cout << outputFile << endl;
cout << "dilate size: ";
cin.getline(line, 100);
if (strlen(line) > 0)
dilate_size = atoi(line);
else
cout << dilate_size << endl;
cout << "blur size: ";
cin.getline(line, 100);
if (strlen(line) > 0)
blur_size = atoi(line);
else
cout << blur_size << endl;
}
if (argc>=2) {
inputFile = argv[1];
}
if (argc>=3) {
outputFile = argv[2];
}
if (argc>=4) {
dilate_size=atoi(argv[3]);
}
if (argc>=5) {
blur_size=atoi(argv[4]);
}
ReadBMP(inputFile, size,width,height, pixels, head);
clock_t cstart=clock();
double *canny_output=(double *)malloc(size * sizeof(double));
canny(pixels,canny_output,size,width,height,5,6);
double *dilate_output=(double *)malloc(size * sizeof(double));
dilate(canny_output,dilate_output,dilate_size,width,height);
double* final_output=(double *)malloc(dim*size * sizeof(double));
double *blur_output=(double *)malloc(size * sizeof(double));
blur(dilate_output,blur_output,blur_size,width,height);
clock_t cstart_e2c=clock();
double *output=(double *)malloc(dim*size * sizeof(double));
edge2color(pixels,blur_output,output,width,height);
clock_t cend_e2c=clock();
clock_t cend=clock();
//colorful(size,canny_output, output);
WriteBMP(outputFile, size, output, head);
cout<<"total smoothing time: "<<(cend-cstart)/1000000.0 << " s" <<endl;
cout<<"edge2color time: "<<(cend_e2c-cstart_e2c)/1000000.0 << " s" <<endl;
return 0;
}
|
77c2e57415a1a9c8b6d672eceb2562e5ac6f61b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "singlemst.h"
// cluster comm t1
// cluster comm t2
// cluster comm t3
// cluster comm t4
// cluster comm changed
// cluster comm t1
// cluster comm t2
// cluster comm t3
// cluster comm t1
// cluster comm t2
// clustercomm.c LIB FUNCTION MIN 3rd ARG changed to be communicated
// clustercomm.c LIB FUNCTION MIN 3rd ARG changed to be communicated
void FALCmpiinit(int argc,char **argv){
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &FALCrank);
MPI_Comm_size(MPI_COMM_WORLD, &FALCsize);
gethostname(FALChostname,255);
hipMalloc(&FALCsendbuff,sizeof(struct FALCbuffer )*FALCsize);
hipMalloc(&FALCrecvbuff,sizeof(struct FALCbuffer )*FALCsize);
hipMalloc(&FALCsendsize,sizeof(int)*FALCsize);
hipMalloc(&FALCrecvsize,sizeof(int)*FALCsize);
for(int i=0;i<FALCsize;i++){int temp=0;
hipMemcpy(&FALCsendsize[i],&temp,sizeof(int),hipMemcpyHostToDevice);}
FALCstatus=(MPI_Status *)malloc(sizeof(MPI_Status)*FALCsize);
FALCrequest=(MPI_Request *)malloc(sizeof(MPI_Request)*FALCsize);
}
__device__ int changed ;
;
int hchanged ;
__global__ void reset ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
((struct struct_hgraph *)(graph.extra))->Weight[id]=99999999;
((struct struct_hgraph *)(graph.extra))->minedge[id]=99999999;
((struct struct_hgraph *)(graph.extra))->minppty[id].lock=0;
}//end fun 0
}
__global__ void minset ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
int ch ;
int t1;
int t2;
int falcft0=graph.index[id+1]-graph.index[id];
int falcft1=graph.index[id];
/*XX*/for(int falcft2=0;falcft2<falcft0;falcft2++){
int ut0=2*(falcft1+falcft2);
int ut1=graph.edges[ut0].ipe;
int ut2=graph.edges[ut0+1].ipe;
t1=set./**/parent[id];
((struct struct_hgraph *)(graph.extra))->minedge[id]=99999999;
t2=set./**/parent[ut1];
if( t1!=t2 ){
GMIN(&(((struct struct_hgraph *)(graph.extra))->Weight[t1]),ut2,/*xx*/changed);//rhs not null
GMIN(&(((struct struct_hgraph *)(graph.extra))->Weight[t2]),ut2,/*xx*/changed);//rhs not null
}//end
}//foreach
}//end fun 0
}
__global__ void Minedge ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
int t1;
int t2;
int t3 ;
int e;
int falcft3=graph.index[id+1]-graph.index[id];
int falcft4=graph.index[id];
/*XX*/for(int falcft5=0;falcft5<falcft3;falcft5++){
int ut3=2*(falcft4+falcft5);
int ut4=graph.edges[ut3].ipe;
int ut5=graph.edges[ut3+1].ipe;
t1=set./**/parent[id];
t2=set./**/parent[ut4];
t3=struct struct_hgraph temp0;/*xx*/
hipSetDevice(0);
((struct struct_hgraph *)(graph.extra))->getWeight(id,/*xx*/ut4);
if( t1!=t2 ){
if( t3==((struct struct_hgraph *)(graph.extra))->Weight[t1] ){
int unnikri =0;
if(atomicCAS( &(((struct struct_hgraph *)(graph.extra))->minppty[t1].lock),FALCRANK,MAX_INT-1)==FALCRANK){
e=ut3/2;
((struct struct_hgraph *)(graph.extra))->mark[e]=1;
((struct struct_hgraph *)(graph.extra))->minppty[t1].src=p;
((struct struct_hgraph *)(graph.extra))->minppty[t1].dst=t;
((struct struct_hgraph *)(graph.extra))->Weight[t1]=t3;
((struct struct_hgraph *)(graph.extra))->minppty[t1].set=t2;
}//end
}//end
if( t3==((struct struct_hgraph *)(graph.extra))->Weight[t2] ){
if(atomicCAS( &(((struct struct_hgraph *)(graph.extra))->minppty[t2].lock),FALCRANK,MAX_INT-1)==FALCRANK){
e=ut3/2;
((struct struct_hgraph *)(graph.extra))->mark[e]=1;
((struct struct_hgraph *)(graph.extra))->minppty[t2].src=p;
((struct struct_hgraph *)(graph.extra))->minppty[t2].dst=t;
((struct struct_hgraph *)(graph.extra))->Weight[t2]=t3;
((struct struct_hgraph *)(graph.extra))->minppty[t2].set=t1;
}//end
}//end
}//end
}//foreach
}//end fun 0
}
__global__ void mstunion ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
int t1;
int t2;
int t3 , t4 ;
t1=set./**/parent[id];
t2=((struct struct_hgraph *)(graph.extra))->minppty[t1].set;
t3=((struct struct_hgraph *)(graph.extra))->minppty[t1].lock;
t4=((struct struct_hgraph *)(graph.extra))->minppty[t2].lock;
if( t1!=t2&&t3==1 ){
setUnion(t1,/*xx*/t2);
changed=1;
}//end
}//end fun 0
}
__global__ void initmark ( GGraph graph ,int FALCX)
{
/* 1 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.nedges){
((struct struct_hgraph *)(graph.extra))->mark[id]=999999999;
}//end fun 0
}
int main ( int argc ,char * argv [ ] )
{FALCmpiinit(argc,argv);
sprintf(partitionfile,"%s",argv[2]);
GGraph hgraph ;
/*TE=2*/
/*TE=2*/
/*TE=2*/
/*TE=2*/
/*TE=2*///better to read graph in a temporary HGraph object and the clone to GGraph.
//Temporary HGraph object can be inside the GGraph class itself.
//For GGraph do not allocate offset in GPU. It is not needed in any kernel.
hgraph.readPointsN(partitionfile,FALCsize);
hgraph.makeNPartitionsMPI(argv[1],FALCrank,FALCsize);
int hgraphflag=0;
alloc_extra_hgraph(hgraph,hgraphflag,hgraph.npoints);
FALCallocbuff(FALCsendbuff,FALCsize,hgraph.remotepoints);
FALCallocbuff(FALCrecvbuff,FALCsize,hgraph.npoints);
int TPB0=1024;
;
GSet hset;
hset.allocate(hgraph.npoints);
/*TE=1*///GPU ASS
int falcvt1;
falcvt1=0;
if(hipMemcpyToSymbol(changed,&(falcvt1),sizeof(int ),0,hipMemcpyHostToDevice)!=hipSuccess)printf("memcpyerror 7");//val=1
t1!=t2&&t3==1
/*SINGLE FLAG initmark 0 */
hipSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){ hipLaunchKernelGGL((
initmark), dim3(hgraphedgekernelblocks),dim3(TPB0), 0, 0, hgraph,kk);}
hipDeviceSynchronize();
hipSetDevice(0);
while(1) {
/*TE=1*///GPU ASS
int falcvt2;
falcvt2=0;
if(hipMemcpyToSymbol(changed,&(falcvt2),sizeof(int ),0,hipMemcpyHostToDevice)!=hipSuccess)printf("memcpyerror 8");//val=1
/*SINGLE FLAG reset 0 */
hipSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){ hipLaunchKernelGGL((
reset), dim3(hgraphedgekernelblocks),dim3(TPB0), 0, 0, hipSetDevice(0);
hipLaunchKernelGGL((
reset), dim3(hgraph.localpoints/TPB0+1),dim3(TPB0), 0, 0, hgraph,hset,0);
hipDeviceSynchronize();
hipSetDevice(0);
hipLaunchKernelGGL(( sendprefix), dim3((hgraph.localpoints+hgraph.remotepoints)/1024+1),dim3(1024), 0, 0, hgraph,tempWeight,tempWeight);
hipDeviceSynchronize();
/*SINGLE FLAG minset 0 */
hipSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){ hipLaunchKernelGGL((
minset), dim3(hgraphedgekernelblocks),dim3(TPB0), 0, 0, hipSetDevice(0);
hipLaunchKernelGGL((
minset), dim3(hgraph.localpoints/TPB0+1),dim3(TPB0), 0, 0, hgraph,hset,0);
hipDeviceSynchronize();
hipSetDevice(0);
for(int kk=1;kk<FALCsize;kk++){
int offstart,offend;
offstart=hgraph.offset[kk-1];
offend=hgraph.offset[kk];hipLaunchKernelGGL((
sendbuff), dim3((offend-offstart)/1024+1),dim3(1024), 0, 0, hgraph,FALCsendsize,FALCsendbuff,tempWeight,tempWeight,kk-1,offstart,(offend-offstart));
}
hipDeviceSynchronize();
for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
hipMemcpy( &temp,&(FALCsendbuff[i]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
hipMemcpy( &temp1,&(FALCsendsize[i]),sizeof(int),hipMemcpyDeviceToHost);
MPI_Isend((temp.vid), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
} if(i>FALCrank){
hipMemcpy( &temp,&(FALCsendbuff[i-1]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
hipMemcpy( &temp1,&(FALCsendsize[i-1]),sizeof(int),hipMemcpyDeviceToHost);
MPI_Isend((temp.vid), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}}for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
hipMemcpy( &temp,&FALCrecvbuff[i],sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
MPI_Recv(temp.vid,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
} if(i>FALCrank){
hipMemcpy( &temp,&FALCrecvbuff[i-1],sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
MPI_Recv(temp.vid,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}}
FALCmsgno++;
for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
hipMemcpy( &temp,&(FALCsendbuff[i]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
hipMemcpy( &temp1,&(FALCsendsize[i]),sizeof(int),hipMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
} if(i>FALCrank){
hipMemcpy( &temp,&(FALCsendbuff[i-1]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
hipMemcpy( &temp1,&FALCsendsize[i-1],sizeof(int),hipMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}}for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
hipMemcpy( &temp,&(FALCrecvbuff[i]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i]);
} if(i>FALCrank){
hipMemcpy( &temp,&(FALCrecvbuff[i-1]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i-1]);
}
}//changed should be synchronized as it is a global var
FALCmsgno++;
if(FALCrank!=0){
for(int i=1;i< FALCsize;i++){
int temp;
hipMemcpyFromSymbol(&temp,changed,sizeof(int),0,hipMemcpyDeviceToHost);
MPI_Isend(&temp,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
}
if(FALCrank==0){
int tempchanged=0;
int temp0;
hipMemcpyFromSymbol(&temp0,changed,sizeof(int),0,hipMemcpyDeviceToHost);
for(int i=1;i<FALCsize;i++){
MPI_Recv(&tempchanged,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
temp0+=tempchanged;
hipMemcpyToSymbol(changed,&temp0,sizeof(int),0,hipMemcpyHostToDevice);
}
FALCmsgno++;
for(int i=1;i< FALCsize;i++)MPI_Isend(&temp0,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
else {
FALCmsgno++;
int temp0;
MPI_Recv(&temp0,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
hipMemcpyToSymbol(changed,&temp0,sizeof(int),0,hipMemcpyHostToDevice);
}
FALCmsgno++;
for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
hipMemcpy( &temp,&(FALCsendbuff[i]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
hipMemcpy( &temp1,&(FALCsendsize[i]),sizeof(int),hipMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
} if(i>FALCrank){
hipMemcpy( &temp,&(FALCsendbuff[i-1]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
hipMemcpy( &temp1,&FALCsendsize[i-1],sizeof(int),hipMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}}for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
hipMemcpy( &temp,&(FALCrecvbuff[i]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i]);
} if(i>FALCrank){
hipMemcpy( &temp,&(FALCrecvbuff[i-1]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i-1]);
}
}//changed should be synchronized as it is a global var
FALCmsgno++;
if(FALCrank!=0){
for(int i=1;i< FALCsize;i++){
int temp;
hipMemcpyFromSymbol(&temp,changed,sizeof(int),0,hipMemcpyDeviceToHost);
MPI_Isend(&temp,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
}
if(FALCrank==0){
int tempchanged=0;
int temp0;
hipMemcpyFromSymbol(&temp0,changed,sizeof(int),0,hipMemcpyDeviceToHost);
for(int i=1;i<FALCsize;i++){
MPI_Recv(&tempchanged,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
temp0+=tempchanged;
hipMemcpyToSymbol(changed,&temp0,sizeof(int),0,hipMemcpyHostToDevice);
}
FALCmsgno++;
for(int i=1;i< FALCsize;i++)MPI_Isend(&temp0,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
else {
FALCmsgno++;
int temp0;
MPI_Recv(&temp0,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
hipMemcpyToSymbol(changed,&temp0,sizeof(int),0,hipMemcpyHostToDevice);
}
for(int kk=0;kk<(FALCsize-1);kk++){
MPI_Get_count(&FALCstatus[kk], MPI_INT, &FALCnamount);
hipLaunchKernelGGL(( update), dim3(FALCnamount/1024+1),dim3(1024), 0, 0, hgraph,FALCrecvbuff,FALCnamount,kk);
}
hipDeviceSynchronize();
//here only master node of a point has updated value, sync it over all nodes needed. future work
for(int i=0;i<FALCsize;i++){int temp=0;
hipMemcpy(&FALCsendsize[i],&temp,sizeof(int),hipMemcpyHostToDevice);}
/*SINGLE FLAG Minedge 1 */
for(int kk=1;kk<FALCsize;kk++){
int offstart,offend;
offstart=hgraph.offset[kk-1];
offend=hgraph.offset[kk];hipLaunchKernelGGL((
sendbuffsingle), dim3((offend-offstart)/1024+1),dim3(1024), 0, 0, hgraph,FALCsendsize,FALCsendbuff,templock,kk-1,offstart,(offend-offstart));
}
hipDeviceSynchronize();
if(FALCRANK!=0){
struct FALCbuffer temp;
hipMemcpy( &temp,&(FALCsendbuff[0]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
hipMemcpy( &temp1,&(FALCsendsize[0]),sizeof(int),hipMemcpyDeviceToHost);
MPI_Isend((temp.vid), temp1, MPI_INT,0,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
MPI_Isend((temp.lock), temp1, MPI_INT, i ,FALCmsgno+1, MPI_COMM_WORLD,&FALCrequest[i]);
}
if(FALCrank==0){
for(int i=1;i<FALCsize;i++){
hipMemcpy( &temp,&(FALCsendbuff[0]),sizeof(struct FALCbuffer),hipMemcpyDeviceToHost);
int temp1;
MPI_Recv((temp.vid), hgraph.npoints, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
MPI_Recv((temp.lock), hgraph.npoints, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
//now update here
MPI_Get_count(&FALCstatus[i], MPI_INT, &FALCnamount);
hipLaunchKernelGGL(( updatesingle), dim3(FALCnamount/1024+1),dim3(1024), 0, 0, hgraph,FALCrecvbuff,FALCnamount,i);
hipDeviceSynchronize();
}
struct struct_hgraph ftt;
hipMemcpy(&ftt,( (struct struct_hgraph *)(hgraph.extra)),sizeof(struct struct struct_hgraph ),hipMemcpyDeviceToHost);
for(int i=1;i<FALCsize;i++)MPI_Send(temp.lock,hgraph.npoints,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCRequest[i]);
}
if(FALCRANK!=0){
struct struct_hgraph ftt;
hipMemcpy(&ftt,( (struct struct_hgraph *)(hgraph.extra)),sizeof(struct struct struct_hgraph ),hipMemcpyDeviceToHost);
MPI_Recv(temp.lock,hgraph.npoints,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCstatus[0]);
}
hipMemcpy(( (struct struct_hgraph *)(hgraph.extra)),sizeof(struct struct struct_hgraph ),hipMemcpyHostToDevice);
for(int i=0;i<FALCsize;i++){int temp=0;
hipMemcpy(&FALCsendsize[i],&temp,sizeof(int),hipMemcpyHostToDevice);}
hipSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){ hipLaunchKernelGGL((
Minedge), dim3(hgraphedgekernelblocks),dim3(TPB0), 0, 0, hipSetDevice(0);
hipLaunchKernelGGL((
Minedge), dim3(hgraph.localpoints/TPB0+1),dim3(TPB0), 0, 0, hgraph,hset,0);
hipDeviceSynchronize();
hipSetDevice(0);
/*SINGLE FLAG mstunion 0 */
hipSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){ hipLaunchKernelGGL((
mstunion), dim3(hgraphedgekernelblocks),dim3(TPB0), 0, 0, hipSetDevice(0);
hipLaunchKernelGGL((
mstunion), dim3(hgraph.localpoints/TPB0+1),dim3(TPB0), 0, 0, hgraph,hset,0);
hipDeviceSynchronize();
hipSetDevice(0);
hipSetDevice(0);
for(int kk=0;ii<hgraph.tot_size;kk+=hgraphpointkernelblocksize*TPB0){hipLaunchKernelGGL((
findset), dim3(hgraphpointkernelblcoksize),dim3(TPB0), 0, 0, hset,kk);
}
hipDeviceSynchronize();hipSetDevice(0);
//GPU IF STMT
int falcvt3;
if(hipMemcpyFromSymbol(&(falcvt3),changed,sizeof(int ),0,hipMemcpyDeviceToHost)!=hipSuccess)printf("memcpyerror 9");
if(falcvt3==0)break;
}//end
unsigned int mst =0;
if( ((struct struct_hgraph *)(hgraph.extra))->mark[(null)/2]==1 )mst=mst+hgraph.edges[(null)+1].ipe;
}//foreach
printf("mst cost=%lu",/*xx*/mst);//rhs not null
MPI_Finalize();
}//end fun 0
| 77c2e57415a1a9c8b6d672eceb2562e5ac6f61b7.cu |
#include "singlemst.h"
// cluster comm t1
// cluster comm t2
// cluster comm t3
// cluster comm t4
// cluster comm changed
// cluster comm t1
// cluster comm t2
// cluster comm t3
// cluster comm t1
// cluster comm t2
// clustercomm.c LIB FUNCTION MIN 3rd ARG changed to be communicated
// clustercomm.c LIB FUNCTION MIN 3rd ARG changed to be communicated
void FALCmpiinit(int argc,char **argv){
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &FALCrank);
MPI_Comm_size(MPI_COMM_WORLD, &FALCsize);
gethostname(FALChostname,255);
cudaMalloc(&FALCsendbuff,sizeof(struct FALCbuffer )*FALCsize);
cudaMalloc(&FALCrecvbuff,sizeof(struct FALCbuffer )*FALCsize);
cudaMalloc(&FALCsendsize,sizeof(int)*FALCsize);
cudaMalloc(&FALCrecvsize,sizeof(int)*FALCsize);
for(int i=0;i<FALCsize;i++){int temp=0;
cudaMemcpy(&FALCsendsize[i],&temp,sizeof(int),cudaMemcpyHostToDevice);}
FALCstatus=(MPI_Status *)malloc(sizeof(MPI_Status)*FALCsize);
FALCrequest=(MPI_Request *)malloc(sizeof(MPI_Request)*FALCsize);
}
__device__ int changed ;
;
int hchanged ;
__global__ void reset ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
((struct struct_hgraph *)(graph.extra))->Weight[id]=99999999;
((struct struct_hgraph *)(graph.extra))->minedge[id]=99999999;
((struct struct_hgraph *)(graph.extra))->minppty[id].lock=0;
}//end fun 0
}
__global__ void minset ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
int ch ;
int t1;
int t2;
int falcft0=graph.index[id+1]-graph.index[id];
int falcft1=graph.index[id];
/*XX*/for(int falcft2=0;falcft2<falcft0;falcft2++){
int ut0=2*(falcft1+falcft2);
int ut1=graph.edges[ut0].ipe;
int ut2=graph.edges[ut0+1].ipe;
t1=set./**/parent[id];
((struct struct_hgraph *)(graph.extra))->minedge[id]=99999999;
t2=set./**/parent[ut1];
if( t1!=t2 ){
GMIN(&(((struct struct_hgraph *)(graph.extra))->Weight[t1]),ut2,/*xx*/changed);//rhs not null
GMIN(&(((struct struct_hgraph *)(graph.extra))->Weight[t2]),ut2,/*xx*/changed);//rhs not null
}//end
}//foreach
}//end fun 0
}
__global__ void Minedge ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
int t1;
int t2;
int t3 ;
int e;
int falcft3=graph.index[id+1]-graph.index[id];
int falcft4=graph.index[id];
/*XX*/for(int falcft5=0;falcft5<falcft3;falcft5++){
int ut3=2*(falcft4+falcft5);
int ut4=graph.edges[ut3].ipe;
int ut5=graph.edges[ut3+1].ipe;
t1=set./**/parent[id];
t2=set./**/parent[ut4];
t3=struct struct_hgraph temp0;/*xx*/
cudaSetDevice(0);
((struct struct_hgraph *)(graph.extra))->getWeight(id,/*xx*/ut4);
if( t1!=t2 ){
if( t3==((struct struct_hgraph *)(graph.extra))->Weight[t1] ){
int unnikri =0;
if(atomicCAS( &(((struct struct_hgraph *)(graph.extra))->minppty[t1].lock),FALCRANK,MAX_INT-1)==FALCRANK){
e=ut3/2;
((struct struct_hgraph *)(graph.extra))->mark[e]=1;
((struct struct_hgraph *)(graph.extra))->minppty[t1].src=p;
((struct struct_hgraph *)(graph.extra))->minppty[t1].dst=t;
((struct struct_hgraph *)(graph.extra))->Weight[t1]=t3;
((struct struct_hgraph *)(graph.extra))->minppty[t1].set=t2;
}//end
}//end
if( t3==((struct struct_hgraph *)(graph.extra))->Weight[t2] ){
if(atomicCAS( &(((struct struct_hgraph *)(graph.extra))->minppty[t2].lock),FALCRANK,MAX_INT-1)==FALCRANK){
e=ut3/2;
((struct struct_hgraph *)(graph.extra))->mark[e]=1;
((struct struct_hgraph *)(graph.extra))->minppty[t2].src=p;
((struct struct_hgraph *)(graph.extra))->minppty[t2].dst=t;
((struct struct_hgraph *)(graph.extra))->Weight[t2]=t3;
((struct struct_hgraph *)(graph.extra))->minppty[t2].set=t1;
}//end
}//end
}//end
}//foreach
}//end fun 0
}
__global__ void mstunion ( GGraph graph ,GSet set ,int FALCX)
{
/* 0 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.localpoints){
int t1;
int t2;
int t3 , t4 ;
t1=set./**/parent[id];
t2=((struct struct_hgraph *)(graph.extra))->minppty[t1].set;
t3=((struct struct_hgraph *)(graph.extra))->minppty[t1].lock;
t4=((struct struct_hgraph *)(graph.extra))->minppty[t2].lock;
if( t1!=t2&&t3==1 ){
setUnion(t1,/*xx*/t2);
changed=1;
}//end
}//end fun 0
}
__global__ void initmark ( GGraph graph ,int FALCX)
{
/* 1 xx*/int id= blockIdx.x * blockDim.x + threadIdx.x+FALCX;
if( id < graph.nedges){
((struct struct_hgraph *)(graph.extra))->mark[id]=999999999;
}//end fun 0
}
int main ( int argc ,char * argv [ ] )
{FALCmpiinit(argc,argv);
sprintf(partitionfile,"%s",argv[2]);
GGraph hgraph ;
/*TE=2*/
/*TE=2*/
/*TE=2*/
/*TE=2*/
/*TE=2*///better to read graph in a temporary HGraph object and the clone to GGraph.
//Temporary HGraph object can be inside the GGraph class itself.
//For GGraph do not allocate offset in GPU. It is not needed in any kernel.
hgraph.readPointsN(partitionfile,FALCsize);
hgraph.makeNPartitionsMPI(argv[1],FALCrank,FALCsize);
int hgraphflag=0;
alloc_extra_hgraph(hgraph,hgraphflag,hgraph.npoints);
FALCallocbuff(FALCsendbuff,FALCsize,hgraph.remotepoints);
FALCallocbuff(FALCrecvbuff,FALCsize,hgraph.npoints);
int TPB0=1024;
;
GSet hset;
hset.allocate(hgraph.npoints);
/*TE=1*///GPU ASS
int falcvt1;
falcvt1=0;
if(cudaMemcpyToSymbol(changed,&(falcvt1),sizeof(int ),0,cudaMemcpyHostToDevice)!=cudaSuccess)printf("memcpyerror 7");//val=1
t1!=t2&&t3==1
/*SINGLE FLAG initmark 0 */
cudaSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){
initmark<<<hgraphedgekernelblocks,TPB0>>>(hgraph,kk);}
cudaDeviceSynchronize();
cudaSetDevice(0);
while(1) {
/*TE=1*///GPU ASS
int falcvt2;
falcvt2=0;
if(cudaMemcpyToSymbol(changed,&(falcvt2),sizeof(int ),0,cudaMemcpyHostToDevice)!=cudaSuccess)printf("memcpyerror 8");//val=1
/*SINGLE FLAG reset 0 */
cudaSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){
reset<<<hgraphedgekernelblocks,TPB0>>>(cudaSetDevice(0);
reset<<<hgraph.localpoints/TPB0+1,TPB0>>>(hgraph,hset,0);
cudaDeviceSynchronize();
cudaSetDevice(0);
sendprefix<<<(hgraph.localpoints+hgraph.remotepoints)/1024+1,1024>>>(hgraph,tempWeight,tempWeight);
cudaDeviceSynchronize();
/*SINGLE FLAG minset 0 */
cudaSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){
minset<<<hgraphedgekernelblocks,TPB0>>>(cudaSetDevice(0);
minset<<<hgraph.localpoints/TPB0+1,TPB0>>>(hgraph,hset,0);
cudaDeviceSynchronize();
cudaSetDevice(0);
for(int kk=1;kk<FALCsize;kk++){
int offstart,offend;
offstart=hgraph.offset[kk-1];
offend=hgraph.offset[kk];
sendbuff<<<(offend-offstart)/1024+1,1024>>>(hgraph,FALCsendsize,FALCsendbuff,tempWeight,tempWeight,kk-1,offstart,(offend-offstart));
}
cudaDeviceSynchronize();
for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
cudaMemcpy( &temp,&(FALCsendbuff[i]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
cudaMemcpy( &temp1,&(FALCsendsize[i]),sizeof(int),cudaMemcpyDeviceToHost);
MPI_Isend((temp.vid), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
} if(i>FALCrank){
cudaMemcpy( &temp,&(FALCsendbuff[i-1]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
cudaMemcpy( &temp1,&(FALCsendsize[i-1]),sizeof(int),cudaMemcpyDeviceToHost);
MPI_Isend((temp.vid), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}}for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
cudaMemcpy( &temp,&FALCrecvbuff[i],sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
MPI_Recv(temp.vid,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
} if(i>FALCrank){
cudaMemcpy( &temp,&FALCrecvbuff[i-1],sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
MPI_Recv(temp.vid,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}}
FALCmsgno++;
for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
cudaMemcpy( &temp,&(FALCsendbuff[i]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
cudaMemcpy( &temp1,&(FALCsendsize[i]),sizeof(int),cudaMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
} if(i>FALCrank){
cudaMemcpy( &temp,&(FALCsendbuff[i-1]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
cudaMemcpy( &temp1,&FALCsendsize[i-1],sizeof(int),cudaMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}}for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
cudaMemcpy( &temp,&(FALCrecvbuff[i]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i]);
} if(i>FALCrank){
cudaMemcpy( &temp,&(FALCrecvbuff[i-1]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i-1]);
}
}//changed should be synchronized as it is a global var
FALCmsgno++;
if(FALCrank!=0){
for(int i=1;i< FALCsize;i++){
int temp;
cudaMemcpyFromSymbol(&temp,changed,sizeof(int),0,cudaMemcpyDeviceToHost);
MPI_Isend(&temp,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
}
if(FALCrank==0){
int tempchanged=0;
int temp0;
cudaMemcpyFromSymbol(&temp0,changed,sizeof(int),0,cudaMemcpyDeviceToHost);
for(int i=1;i<FALCsize;i++){
MPI_Recv(&tempchanged,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
temp0+=tempchanged;
cudaMemcpyToSymbol(changed,&temp0,sizeof(int),0,cudaMemcpyHostToDevice);
}
FALCmsgno++;
for(int i=1;i< FALCsize;i++)MPI_Isend(&temp0,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
else {
FALCmsgno++;
int temp0;
MPI_Recv(&temp0,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
cudaMemcpyToSymbol(changed,&temp0,sizeof(int),0,cudaMemcpyHostToDevice);
}
FALCmsgno++;
for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
cudaMemcpy( &temp,&(FALCsendbuff[i]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
cudaMemcpy( &temp1,&(FALCsendsize[i]),sizeof(int),cudaMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
} if(i>FALCrank){
cudaMemcpy( &temp,&(FALCsendbuff[i-1]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
cudaMemcpy( &temp1,&FALCsendsize[i-1],sizeof(int),cudaMemcpyDeviceToHost);
MPI_Isend((temp.Weight), temp1, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i-1]);
}}for(int i=0;i<FALCsize;i++){
struct FALCbuffer temp;
if(i<FALCrank){
cudaMemcpy( &temp,&(FALCrecvbuff[i]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i]);
} if(i>FALCrank){
cudaMemcpy( &temp,&(FALCrecvbuff[i-1]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
MPI_Recv(temp.Weight,hgraph.npoints, MPI_INT,i, FALCmsgno, MPI_COMM_WORLD,&FALCstatus[i-1]);
}
}//changed should be synchronized as it is a global var
FALCmsgno++;
if(FALCrank!=0){
for(int i=1;i< FALCsize;i++){
int temp;
cudaMemcpyFromSymbol(&temp,changed,sizeof(int),0,cudaMemcpyDeviceToHost);
MPI_Isend(&temp,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
}
if(FALCrank==0){
int tempchanged=0;
int temp0;
cudaMemcpyFromSymbol(&temp0,changed,sizeof(int),0,cudaMemcpyDeviceToHost);
for(int i=1;i<FALCsize;i++){
MPI_Recv(&tempchanged,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
temp0+=tempchanged;
cudaMemcpyToSymbol(changed,&temp0,sizeof(int),0,cudaMemcpyHostToDevice);
}
FALCmsgno++;
for(int i=1;i< FALCsize;i++)MPI_Isend(&temp0,1,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCrequest[i-1]);
}
else {
FALCmsgno++;
int temp0;
MPI_Recv(&temp0,1,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
cudaMemcpyToSymbol(changed,&temp0,sizeof(int),0,cudaMemcpyHostToDevice);
}
for(int kk=0;kk<(FALCsize-1);kk++){
MPI_Get_count(&FALCstatus[kk], MPI_INT, &FALCnamount);
update<<< FALCnamount/1024+1,1024>>>(hgraph,FALCrecvbuff,FALCnamount,kk);
}
cudaDeviceSynchronize();
//here only master node of a point has updated value, sync it over all nodes needed. future work
for(int i=0;i<FALCsize;i++){int temp=0;
cudaMemcpy(&FALCsendsize[i],&temp,sizeof(int),cudaMemcpyHostToDevice);}
/*SINGLE FLAG Minedge 1 */
for(int kk=1;kk<FALCsize;kk++){
int offstart,offend;
offstart=hgraph.offset[kk-1];
offend=hgraph.offset[kk];
sendbuffsingle<<<(offend-offstart)/1024+1,1024>>>(hgraph,FALCsendsize,FALCsendbuff,templock,kk-1,offstart,(offend-offstart));
}
cudaDeviceSynchronize();
if(FALCRANK!=0){
struct FALCbuffer temp;
cudaMemcpy( &temp,&(FALCsendbuff[0]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
cudaMemcpy( &temp1,&(FALCsendsize[0]),sizeof(int),cudaMemcpyDeviceToHost);
MPI_Isend((temp.vid), temp1, MPI_INT,0,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
MPI_Isend((temp.lock), temp1, MPI_INT, i ,FALCmsgno+1, MPI_COMM_WORLD,&FALCrequest[i]);
}
if(FALCrank==0){
for(int i=1;i<FALCsize;i++){
cudaMemcpy( &temp,&(FALCsendbuff[0]),sizeof(struct FALCbuffer),cudaMemcpyDeviceToHost);
int temp1;
MPI_Recv((temp.vid), hgraph.npoints, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
MPI_Recv((temp.lock), hgraph.npoints, MPI_INT, i ,FALCmsgno, MPI_COMM_WORLD,&FALCrequest[i]);
//now update here
MPI_Get_count(&FALCstatus[i], MPI_INT, &FALCnamount);
updatesingle<<< FALCnamount/1024+1,1024>>>(hgraph,FALCrecvbuff,FALCnamount,i);
cudaDeviceSynchronize();
}
struct struct_hgraph ftt;
cudaMemcpy(&ftt,( (struct struct_hgraph *)(hgraph.extra)),sizeof(struct struct struct_hgraph ),cudaMemcpyDeviceToHost);
for(int i=1;i<FALCsize;i++)MPI_Send(temp.lock,hgraph.npoints,MPI_INT,i,FALCmsgno,MPI_COMM_WORLD,&FALCRequest[i]);
}
if(FALCRANK!=0){
struct struct_hgraph ftt;
cudaMemcpy(&ftt,( (struct struct_hgraph *)(hgraph.extra)),sizeof(struct struct struct_hgraph ),cudaMemcpyDeviceToHost);
MPI_Recv(temp.lock,hgraph.npoints,MPI_INT,0,FALCmsgno,MPI_COMM_WORLD,&FALCstatus[0]);
}
cudaMemcpy(( (struct struct_hgraph *)(hgraph.extra)),sizeof(struct struct struct_hgraph ),cudaMemcpyHostToDevice);
for(int i=0;i<FALCsize;i++){int temp=0;
cudaMemcpy(&FALCsendsize[i],&temp,sizeof(int),cudaMemcpyHostToDevice);}
cudaSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){
Minedge<<<hgraphedgekernelblocks,TPB0>>>(cudaSetDevice(0);
Minedge<<<hgraph.localpoints/TPB0+1,TPB0>>>(hgraph,hset,0);
cudaDeviceSynchronize();
cudaSetDevice(0);
/*SINGLE FLAG mstunion 0 */
cudaSetDevice(0);
for(int kk=0;kk<hgraph.nedges;kk+=hgraphedgekernelblocks*TPB0){
mstunion<<<hgraphedgekernelblocks,TPB0>>>(cudaSetDevice(0);
mstunion<<<hgraph.localpoints/TPB0+1,TPB0>>>(hgraph,hset,0);
cudaDeviceSynchronize();
cudaSetDevice(0);
cudaSetDevice(0);
for(int kk=0;ii<hgraph.tot_size;kk+=hgraphpointkernelblocksize*TPB0){
findset<<<hgraphpointkernelblcoksize,TPB0>>>( hset,kk);
}
cudaDeviceSynchronize();cudaSetDevice(0);
//GPU IF STMT
int falcvt3;
if(cudaMemcpyFromSymbol(&(falcvt3),changed,sizeof(int ),0,cudaMemcpyDeviceToHost)!=cudaSuccess)printf("memcpyerror 9");
if(falcvt3==0)break;
}//end
unsigned int mst =0;
if( ((struct struct_hgraph *)(hgraph.extra))->mark[(null)/2]==1 )mst=mst+hgraph.edges[(null)+1].ipe;
}//foreach
printf("mst cost=%lu",/*xx*/mst);//rhs not null
MPI_Finalize();
}//end fun 0
|
5f3792fe6f35f1432f5a72795894292f6220d5da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 University of Basel, Medical Image Analysis Center
*
* Author: Benedikt Bitterli (benedikt.bitterli@unibas.ch)
* Christoph Jud (christoph.jud@unibas.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GpuEvaluator.h"
#include "CudaUtils.h"
#include "GpuEvaluatorDisplacementField.cu"
#include "GpuEvaluatorMSE.cu"
#include "GpuEvaluatorNCC.cu"
#include "GpuEvaluatorLCC.cu"
#include "GpuEvaluatorRegularizers.cu"
GpuKernelEvaluator::GpuKernelEvaluator(int numParameters,
int subsample,
int subsampleNeighborhood,
Kernel kernel,
BsplineImage<ScalarType, SpaceDimensions> movingImage,
ImageNearest<ScalarType, SpaceDimensions> fixedImage,
ImageNearest<VecF, SpaceDimensions> cpImage,
ImageNearest<ScalarType, SpaceDimensions> cpwImage)
: _numberOfParameters(numParameters),
_subsample(subsample),
_subsampleNeighborhood(subsampleNeighborhood),
_regRKHS(0),
_regRD(0),
_regPG(0),
_regRDScaling(1.0),
_regPGScaling(1.0),
_subsampledSize(fixedImage.size()/_subsample),
_displacementFieldPtr(nullptr),
_diffs(new ScalarType[fixedImage.size().product()]),
_fvalues(new ScalarType[fixedImage.size().product()]),
_mvalues(new ScalarType[fixedImage.size().product()]),
// _ffvalues(new ScalarType[fixedImage.size().product()]),
// _mmvalues(new ScalarType[fixedImage.size().product()]),
// _fmvalues(new ScalarType[fixedImage.size().product()]),
_ccvalues(new ScalarType[fixedImage.size().product()]),
_derivativesF(new ScalarType[numParameters]),
_derivativesM(new ScalarType[numParameters]),
_numberOfPixelsCounted(new int[fixedImage.size().product()]),
_cubeOffsets(new VecI[_subsampledSize.product()]),
_deviceDerivatives(allocCuda<ScalarType>(numParameters)),
_deviceDerivativesF(allocCuda<ScalarType>(numParameters)),
_deviceDerivativesM(allocCuda<ScalarType>(numParameters)),
_deviceDiffs(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceFvalues(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceMvalues(allocCuda<ScalarType>(fixedImage.size().product())),
// _deviceFFvalues(allocCuda<ScalarType>(fixedImage.size().product())),
// _deviceMMvalues(allocCuda<ScalarType>(fixedImage.size().product())),
// _deviceFMvalues(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceCCvalues(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceNumberOfPixelsCounted(allocCuda<int>(fixedImage.size().product())),
_deviceGradients(allocCuda<VecF>(fixedImage.size().product())),
_deviceMovingImage(allocCuda<ScalarType>(movingImage.size().product(), movingImage.coeffs())),
_deviceFixedImage(allocCuda<ScalarType>(fixedImage.size().product(), fixedImage.data())),
_deviceCpImage(allocCuda<VecF>(cpImage.size().product())),
_deviceCpwImage(allocCuda<ScalarType>(cpwImage.size().product())),
_deviceWImage(allocCuda<ScalarType>(kernel.wImage().size().product(), kernel.wImage().data())),
_deviceWTensor(allocCuda<MatF>(kernel.wTensor().size().product(), kernel.wTensor().data())),
_deviceCubeOffsets(_subsample > 1 ? allocCuda<VecI>(_subsampledSize.product()) : nullptr),
_kernel(kernel),
_movingImage(movingImage),
_fixedImage(fixedImage),
_cpImage(cpImage),
_cpwImage(cpwImage),
is_evaluated_once(false)
{
_movingImage.assignCoeffs(_deviceMovingImage.get());
_fixedImage.assignData(_deviceFixedImage.get());
_cpImage.assignData(_deviceCpImage.get());
_cpwImage.assignData(_deviceCpwImage.get());
_kernel.wImage().assignData(_deviceWImage.get());
_kernel.wTensor().assignData(_deviceWTensor.get());
}
GpuKernelEvaluator::EvaluationResult
GpuKernelEvaluator::getValue(MEASURE metric,
const VecF *cpData,
const ScalarType *cpwData,
ImageNearest<VecF, SpaceDimensions> displacementField,
bool do_resampling)
{
VecI fixedSize = _fixedImage.size();
VecI paramSize = _cpImage.size();
cudaCheck(hipMemcpy( _deviceCpImage.get(), cpData, _cpImage.size().product()*sizeof(VecF), hipMemcpyHostToDevice), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_deviceCpwImage.get(), cpwData, _cpwImage.size().product()*sizeof(ScalarType), hipMemcpyHostToDevice), __FILE__, __LINE__);
if (_subsample > 1) {
int numTexels = _subsampledSize.product();
if(!is_evaluated_once || do_resampling){
for (int i = 0; i < numTexels; ++i)
_cubeOffsets[i] = VecI(_rng.nextV<SpaceDimensions>()*static_cast<ScalarType>(_subsample));
is_evaluated_once = true;
}
cudaCheck(hipMemcpy(_deviceCubeOffsets.get(), _cubeOffsets.get(), _subsampledSize.product()*sizeof(VecI), hipMemcpyHostToDevice), __FILE__, __LINE__);
}
if (displacementField.data() != _displacementFieldPtr) {
_displacementFieldPtr = displacementField.data();
_displacementField = displacementField;
_deviceDisplacementField = allocCuda<VecF>(displacementField.size().product());
cudaCheck(hipMemcpy(_deviceDisplacementField.get(), _displacementFieldPtr, displacementField.size().product()*sizeof(VecF), hipMemcpyHostToDevice), __FILE__, __LINE__);
_displacementField.assignData(_deviceDisplacementField.get());
}
GpuParams params {
paramSize,
_subsampledSize,
_subsample,
_subsampleNeighborhood,
_deviceCubeOffsets.get(),
_kernel,
_movingImage,
_fixedImage,
_cpImage,
_cpwImage,
_displacementField,
_displacementFieldPtr != nullptr,
_regRKHS,
_regRD,
_regPG,
_regRDScaling,
_regPGScaling,
_deviceGradients.get(),
nullptr, // displacements
_deviceNumberOfPixelsCounted.get(),
nullptr, // derivatives
_deviceBilateralRegularization.get()
};
//------------------------------------------------------------------------------------------------------
// calculating loss function values
#if SpaceDimensions == 3
dim3 threadsPerBlock(4, 4, 4);
#else
dim3 threadsPerBlock(16, 16);
#endif
dim3 blocksPerGrid = dim3(
(_subsampledSize[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(_subsampledSize[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(_subsampledSize[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
ScalarType measure = 0.0;
int pixelsCounted = 0;
if(metric == MEASURE::MSE){
GpuMSEParams mse_params{
_deviceDiffs.get(),
};
if (_subsample == 1){
resolveValueMSE<false>(blocksPerGrid, threadsPerBlock, params, mse_params);
}
else{
resolveValueMSE<true> (blocksPerGrid, threadsPerBlock, params, mse_params);
}
cudaCheck(hipMemcpy(_diffs.get(), _deviceDiffs.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
ScalarType mse = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
mse += _diffs[i]*_diffs[i]; // TODO: could be done with only fvalues and mvalues. diffs is not needed.
pixelsCounted += _numberOfPixelsCounted[i];
}
mse /= pixelsCounted; // pixelsCounted should be checked to zero
measure = mse;
}
else if(metric == MEASURE::NCC){
GpuNCCParams ncc_params{
0, // mean_mf;
0, // mean_mm;
_deviceFvalues.get(),
_deviceMvalues.get(),
// _deviceFFvalues.get(),
// _deviceMMvalues.get(),
// _deviceFMvalues.get(),
nullptr,
nullptr
};
if (_subsample == 1){
resolveValueNCC<false>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
else{
resolveValueNCC<true> (blocksPerGrid, threadsPerBlock, params, ncc_params);
}
cudaCheck(hipMemcpy(_fvalues.get(), _deviceFvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_mvalues.get(), _deviceMvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(hipMemcpy(_ffvalues.get(), _deviceFFvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(hipMemcpy(_mmvalues.get(), _deviceMMvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(hipMemcpy(_fmvalues.get(), _deviceFMvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
ScalarType denom = 1.0;
ScalarType smm = 0.0;
ScalarType sfm = 0.0;
ScalarType sf = 0.0;
ScalarType sm = 0.0;
ScalarType sff = 0.0;
smm = 0.0;
sfm = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// sff += _ffvalues[i];
// smm += _mmvalues[i];
// sfm += _fmvalues[i];
sff += _fvalues[i]*_fvalues[i];
smm += _mvalues[i]*_mvalues[i];
sfm += _fvalues[i]*_mvalues[i];
sf += _fvalues[i];
sm += _mvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
// subtract mean
sff -= (sf*sf/pixelsCounted);
smm -= (sm*sm/pixelsCounted);
sfm -= (sf*sm/pixelsCounted);
denom = -1.0 * std::sqrt(sff*smm);
ScalarType ncc = 0.0;
if(denom!=0)
ncc = sfm/denom;
measure = ncc;
}
else if(metric == MEASURE::LCC){
GpuLCCParams lcc_params{
_deviceCCvalues.get(),
};
if (_subsample == 1){
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<false>(blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<false>(1, 1, params, lcc_params);
#endif
}
else{
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<true> (blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<true> (1, 1, params, lcc_params);
#endif
}
cudaCheck(hipMemcpy( _ccvalues.get(), _deviceCCvalues .get(), fixedSize.product()*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
ScalarType lcc = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// ScalarType v = _ccvalues[i];
// if(std::isfinite(v))
// lcc += v;
lcc += _ccvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
lcc /= pixelsCounted; // pixelsCounted should be checked to zero
measure = lcc;
}
//------------------------------------------------------------------------------------------------------
// calculating bilateral regularizer
// copy current derivatives to gpu
ScalarType reg_value_rkhs = 0;
ScalarType reg_value_rd = 0;
ScalarType reg_value_pg = 0;
/** RKHS norm: c'Kc */
if(_regRKHS>0){
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( regularizerRKHS<true, false, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( regularizerRKHS<false, true, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else
hipLaunchKernelGGL(( regularizerRKHS<false, false, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
cudaCheck(hipMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rkhs += _bilateralRegularization[i];
}
if(_regRD>0){
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( regularizerRD<true, false, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( regularizerRD<false, true, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else
hipLaunchKernelGGL(( regularizerRD<false, false, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
cudaCheck(hipMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rd += _bilateralRegularization[i];
}
if(_regPG){
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( regularizerPG<true, false, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( regularizerPG<false, true, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else
hipLaunchKernelGGL(( regularizerPG<false, false, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
cudaCheck(hipMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_pg += _bilateralRegularization[i];
}
return EvaluationResult{measure, reg_value_rkhs, reg_value_rd, reg_value_pg, pixelsCounted};
}
// TODO
GpuKernelEvaluator::EvaluationResult
GpuKernelEvaluator::getValueAndDerivative(MEASURE metric,
const VecF *cpData,
const ScalarType *cpwData,
ImageNearest<VecF, SpaceDimensions> displacementField,
ScalarType *derivatives,
bool do_resampling)
{
VecI fixedSize = _fixedImage.size();
VecI paramSize = _cpImage.size();
cudaCheck(hipMemcpy( _deviceCpImage.get(), cpData, _cpImage.size().product()*sizeof(VecF), hipMemcpyHostToDevice), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_deviceCpwImage.get(), cpwData, _cpwImage.size().product()*sizeof(ScalarType), hipMemcpyHostToDevice), __FILE__, __LINE__);
if (_subsample > 1) {
int numTexels = _subsampledSize.product();
if(!is_evaluated_once || do_resampling){
for (int i = 0; i < numTexels; ++i)
_cubeOffsets[i] = VecI(_rng.nextV<SpaceDimensions>()*static_cast<ScalarType>(_subsample));
is_evaluated_once = true;
}
cudaCheck(hipMemcpy(_deviceCubeOffsets.get(), _cubeOffsets.get(), _subsampledSize.product()*sizeof(VecI), hipMemcpyHostToDevice), __FILE__, __LINE__);
}
if (displacementField.data() != _displacementFieldPtr) {
_displacementFieldPtr = displacementField.data();
_displacementField = displacementField;
_deviceDisplacementField = allocCuda<VecF>(displacementField.size().product());
cudaCheck(hipMemcpy(_deviceDisplacementField.get(), _displacementFieldPtr, displacementField.size().product()*sizeof(VecF), hipMemcpyHostToDevice), __FILE__, __LINE__);
_displacementField.assignData(_deviceDisplacementField.get());
}
GpuParams params {
paramSize,
_subsampledSize,
_subsample,
_subsampleNeighborhood,
_deviceCubeOffsets.get(),
_kernel,
_movingImage,
_fixedImage,
_cpImage,
_cpwImage,
_displacementField,
_displacementFieldPtr != nullptr,
_regRKHS,
_regRD,
_regPG,
_regRDScaling,
_regPGScaling,
_deviceGradients.get(),
nullptr,
_deviceNumberOfPixelsCounted.get(),
_deviceDerivatives.get(),
_deviceBilateralRegularization.get()
};
//------------------------------------------------------------------------------------------------------
// calculating loss function values
#if SpaceDimensions == 3
dim3 threadsPerBlock(4, 4, 4);
#else
dim3 threadsPerBlock(16, 16);
#endif
dim3 blocksPerGrid = dim3(
(_subsampledSize[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(_subsampledSize[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(_subsampledSize[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
ScalarType measure = 0.0;
int pixelsCounted = 0;
ScalarType denom = 1.0;
ScalarType smm = 0.0;
ScalarType sfm = 0.0;
ScalarType mean_mf = 0.0;
ScalarType mean_mm = 0.0;
if(metric == MEASURE::MSE){
GpuMSEParams mse_params{
_deviceDiffs.get(),
};
if (_subsample == 1){
resolveValueMSE<false>(blocksPerGrid, threadsPerBlock, params, mse_params);
}
else{
resolveValueMSE<true> (blocksPerGrid, threadsPerBlock, params, mse_params);
}
cudaCheck(hipMemcpy(_diffs.get(), _deviceDiffs.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
ScalarType mse = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
mse += _diffs[i]*_diffs[i]; // TODO: could be done with only fvalues and mvalues. diffs is not needed.
pixelsCounted += _numberOfPixelsCounted[i];
}
mse /= pixelsCounted; // pixelsCounted should be checked to zero
measure = mse;
}
else if(metric == MEASURE::NCC){
GpuNCCParams ncc_params{
0, // mean_mf;
0, // mean_mm;
_deviceFvalues.get(),
_deviceMvalues.get(),
// _deviceFFvalues.get(),
// _deviceMMvalues.get(),
// _deviceFMvalues.get(),
_deviceDerivativesF.get(),
_deviceDerivativesM.get()
};
if (_subsample == 1){
resolveValueNCC<false>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
else{
resolveValueNCC<true>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
cudaCheck(hipMemcpy(_fvalues.get(), _deviceFvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_mvalues.get(), _deviceMvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(hipMemcpy(_ffvalues.get(), _deviceFFvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(hipMemcpy(_mmvalues.get(), _deviceMMvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(hipMemcpy(_fmvalues.get(), _deviceFMvalues.get(), fixedSize.product()*sizeof( ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
ScalarType sf = 0.0;
ScalarType sm = 0.0;
ScalarType sff = 0.0;
smm = 0.0;
sfm = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// sff += _ffvalues[i];
// smm += _mmvalues[i];
// sfm += _fmvalues[i];
sff += _fvalues[i]*_fvalues[i];
smm += _mvalues[i]*_mvalues[i];
sfm += _fvalues[i]*_mvalues[i];
sf += _fvalues[i];
sm += _mvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
// subtract mean
sff -= (sf*sf/pixelsCounted);
smm -= (sm*sm/pixelsCounted);
sfm -= (sf*sm/pixelsCounted);
denom = -1.0 * std::sqrt(sff*smm);
ScalarType ncc = 0.0;
if(denom!=0)
ncc = sfm/denom;
measure = ncc;
// save mean f and m values in GPU parameter struct
// since they are needed in calculating the derivative
mean_mf = sf/pixelsCounted;
mean_mm = sm/pixelsCounted;
}
else if(metric == MEASURE::LCC){
GpuLCCParams lcc_params{
_deviceCCvalues.get(),
};
if (_subsample == 1){
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<false>(blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<false>(1, 1, params, lcc_params);
#endif
}
else{
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<true>(blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<true>(1, 1, params, lcc_params);
#endif
}
cudaCheck(hipMemcpy( _ccvalues.get(), _deviceCCvalues .get(), fixedSize.product()*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
ScalarType lcc = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// ScalarType v = _ccvalues[i];
// if(std::isfinite(v))
// lcc += v;
lcc += _ccvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
lcc /= pixelsCounted; // pixelsCounted should be checked to zero
measure = lcc;
}
//------------------------------------------------------------------------------------------------------
// calculating derivatives
hipMemset(_deviceDerivatives.get(), 0, _numberOfParameters*sizeof(ScalarType));
blocksPerGrid = dim3(
(paramSize[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(paramSize[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(paramSize[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
if(metric == MEASURE::MSE){
GpuMSEParams mse_params{
_deviceDiffs.get(),
};
if (_subsample == 1){
resolveDerivativeMSE<false>(blocksPerGrid, threadsPerBlock, params, mse_params);
}
else{
resolveDerivativeMSE<true> (blocksPerGrid, threadsPerBlock, params, mse_params);
}
cudaCheck(hipMemcpy(derivatives, _deviceDerivatives.get(), _numberOfParameters*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters; ++i)
derivatives[i] /= pixelsCounted;
}
else if(metric == MEASURE::NCC){
GpuNCCParams ncc_params{
mean_mf,
mean_mm,
_deviceFvalues.get(),
_deviceMvalues.get(),
// _deviceFFvalues.get(),
// _deviceMMvalues.get(),
// _deviceFMvalues.get(),
_deviceDerivativesF.get(),
_deviceDerivativesM.get()
};
if (_subsample == 1){
resolveDerivativeNCC<false>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
else{
resolveDerivativeNCC<true> (blocksPerGrid, threadsPerBlock, params, ncc_params);
}
cudaCheck(hipMemcpy(_derivativesF.get(), _deviceDerivativesF.get(), _numberOfParameters*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_derivativesM.get(), _deviceDerivativesM.get(), _numberOfParameters*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters; ++i){
if(denom!=0)
derivatives[i] = ( _derivativesF[i] - (sfm/smm)*_derivativesM[i] ) / denom;
else
derivatives[i] = 0.0;
}
}
else if(metric == MEASURE::LCC){
if (_subsample == 1){
resolveDerivativeLCC<false>(blocksPerGrid, threadsPerBlock, params);
}
else{
resolveDerivativeLCC<true> (blocksPerGrid, threadsPerBlock, params);
}
cudaCheck(hipMemcpy(derivatives, _deviceDerivatives.get(), _numberOfParameters*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters; ++i)
derivatives[i] /= pixelsCounted;
}
//------------------------------------------------------------------------------------------------------
// calculating bilateral regularizer
// copy current derivatives to gpu
if(_regRKHS>0 || _regRD>0 || _regPG>0){
cudaCheck(hipMemcpy(_deviceDerivatives.get(), derivatives, _numberOfParameters*sizeof(ScalarType), hipMemcpyHostToDevice), __FILE__, __LINE__);
}
ScalarType reg_value_rkhs = 0;
ScalarType reg_value_rd = 0;
ScalarType reg_value_pg = 0;
/** RKHS norm: c'Kc */
if(_regRKHS>0){
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( regularizerRKHS<true, false, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( regularizerRKHS<false, true, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else
hipLaunchKernelGGL(( regularizerRKHS<false, false, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
cudaCheck(hipMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rkhs += _bilateralRegularization[i];
}
if(_regRD>0){
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( regularizerRD<true, false, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( regularizerRD<false, true, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else
hipLaunchKernelGGL(( regularizerRD<false, false, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
cudaCheck(hipMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rd += _bilateralRegularization[i];
}
if(_regPG){
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( regularizerPG<true, false, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( regularizerPG<false, true, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else
hipLaunchKernelGGL(( regularizerPG<false, false, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
cudaCheck(hipMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_pg += _bilateralRegularization[i];
}
// fetch current derivative which has been updated by the bilateral regularizers
if(_regRKHS>0 || _regRD>0 || _regPG>0){
cudaCheck(hipMemcpy(derivatives, _deviceDerivatives.get(), _numberOfParameters*sizeof(ScalarType), hipMemcpyDeviceToHost), __FILE__, __LINE__);
}
return EvaluationResult{measure, reg_value_rkhs, reg_value_rd, reg_value_pg, pixelsCounted};
}
void GpuKernelEvaluator::evaluateDisplacementField(const VecF *cpData, const ScalarType *cpwData, ImageNearest<VecF, SpaceDimensions> displacementField, VecF *dst)
{
cudaCheck(hipMemcpy( _deviceCpImage.get(), cpData, _cpImage.size().product()*sizeof(VecF), hipMemcpyHostToDevice), __FILE__, __LINE__);
cudaCheck(hipMemcpy(_deviceCpwImage.get(), cpwData, _cpwImage.size().product()*sizeof(ScalarType), hipMemcpyHostToDevice), __FILE__, __LINE__);
if (displacementField.data() != _displacementFieldPtr) {
_displacementFieldPtr = displacementField.data();
_displacementField = displacementField;
_deviceDisplacementField.reset();
if (_displacementFieldPtr) {
_deviceDisplacementField = allocCuda<VecF>(displacementField.size().product());
cudaCheck(hipMemcpy(_deviceDisplacementField.get(), _displacementFieldPtr, displacementField.size().product()*sizeof(VecF), hipMemcpyHostToDevice), __FILE__, __LINE__);
}
_displacementField.assignData(_deviceDisplacementField.get());
}
GpuParams params {
_cpImage.size(),
_fixedImage.size(),
1,
1,
nullptr,
_kernel,
_movingImage,
_fixedImage,
_cpImage,
_cpwImage,
_displacementField,
_displacementField.data() != nullptr,
_regRKHS,
_regRD,
_regPG,
_regRDScaling,
_regPGScaling,
nullptr,
_deviceGradients.get(),
nullptr,
nullptr,
nullptr
};
#if SpaceDimensions == 3
dim3 threadsPerBlock(4, 4, 4);
#else
dim3 threadsPerBlock(16, 16);
#endif
dim3 blocksPerGrid = dim3(
(_fixedImage.size()[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(_fixedImage.size()[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(_fixedImage.size()[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
if (params.kernel.useWeightImage())
hipLaunchKernelGGL(( evaluateDisplacement<true, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else if (params.kernel.useWeightTensor())
hipLaunchKernelGGL(( evaluateDisplacement<false, true>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
else
hipLaunchKernelGGL(( evaluateDisplacement<false, false>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, params);
cudaCheck(hipPeekAtLastError(), __FILE__, __LINE__);
cudaCheck(hipDeviceSynchronize(), __FILE__, __LINE__);
cudaCheck(hipMemcpy(dst, _deviceGradients.get(), _fixedImage.size().product()*sizeof(VecF), hipMemcpyDeviceToHost), __FILE__, __LINE__);
}
void GpuKernelEvaluator::SetRegularizerRKHS(ScalarType weight){
if (weight != _regRKHS) {
_regRKHS = weight;
_bilateralRegularization.reset();
_deviceBilateralRegularization.reset();
if (_regRKHS > 0) {
_bilateralRegularization.reset(new ScalarType[_numberOfParameters/SpaceDimensions]);
_deviceBilateralRegularization = allocCuda<ScalarType>(_numberOfParameters/SpaceDimensions);
}
}
}
void GpuKernelEvaluator::SetRegularizerRD(ScalarType weight, ScalarType scaling){
if (weight != _regRD) {
_regRD = weight;
_bilateralRegularization.reset();
_deviceBilateralRegularization.reset();
if (_regRD > 0) {
_bilateralRegularization.reset(new ScalarType[_numberOfParameters/SpaceDimensions]);
_deviceBilateralRegularization = allocCuda<ScalarType>(_numberOfParameters/SpaceDimensions);
}
}
if(scaling<=0.0){
std::cout << "Attention: scaling of regularizer must be strictly positive!" << std::endl;
}
_regRDScaling = scaling;
}
void GpuKernelEvaluator::SetRegularizerPG(ScalarType weight, ScalarType scaling){
if (weight != _regPG) {
_regPG = weight;
_bilateralRegularization.reset();
_deviceBilateralRegularization.reset();
if (_regPG > 0) {
_bilateralRegularization.reset(new ScalarType[_numberOfParameters/SpaceDimensions]);
_deviceBilateralRegularization = allocCuda<ScalarType>(_numberOfParameters/SpaceDimensions);
}
}
if(scaling<=0.0){
std::cout << "Attention: scaling of regularizer must be strictly positive!" << std::endl;
}
_regPGScaling = scaling;
} | 5f3792fe6f35f1432f5a72795894292f6220d5da.cu | /*
* Copyright 2016 University of Basel, Medical Image Analysis Center
*
* Author: Benedikt Bitterli (benedikt.bitterli@unibas.ch)
* Christoph Jud (christoph.jud@unibas.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "GpuEvaluator.h"
#include "CudaUtils.h"
#include "GpuEvaluatorDisplacementField.cu"
#include "GpuEvaluatorMSE.cu"
#include "GpuEvaluatorNCC.cu"
#include "GpuEvaluatorLCC.cu"
#include "GpuEvaluatorRegularizers.cu"
GpuKernelEvaluator::GpuKernelEvaluator(int numParameters,
int subsample,
int subsampleNeighborhood,
Kernel kernel,
BsplineImage<ScalarType, SpaceDimensions> movingImage,
ImageNearest<ScalarType, SpaceDimensions> fixedImage,
ImageNearest<VecF, SpaceDimensions> cpImage,
ImageNearest<ScalarType, SpaceDimensions> cpwImage)
: _numberOfParameters(numParameters),
_subsample(subsample),
_subsampleNeighborhood(subsampleNeighborhood),
_regRKHS(0),
_regRD(0),
_regPG(0),
_regRDScaling(1.0),
_regPGScaling(1.0),
_subsampledSize(fixedImage.size()/_subsample),
_displacementFieldPtr(nullptr),
_diffs(new ScalarType[fixedImage.size().product()]),
_fvalues(new ScalarType[fixedImage.size().product()]),
_mvalues(new ScalarType[fixedImage.size().product()]),
// _ffvalues(new ScalarType[fixedImage.size().product()]),
// _mmvalues(new ScalarType[fixedImage.size().product()]),
// _fmvalues(new ScalarType[fixedImage.size().product()]),
_ccvalues(new ScalarType[fixedImage.size().product()]),
_derivativesF(new ScalarType[numParameters]),
_derivativesM(new ScalarType[numParameters]),
_numberOfPixelsCounted(new int[fixedImage.size().product()]),
_cubeOffsets(new VecI[_subsampledSize.product()]),
_deviceDerivatives(allocCuda<ScalarType>(numParameters)),
_deviceDerivativesF(allocCuda<ScalarType>(numParameters)),
_deviceDerivativesM(allocCuda<ScalarType>(numParameters)),
_deviceDiffs(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceFvalues(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceMvalues(allocCuda<ScalarType>(fixedImage.size().product())),
// _deviceFFvalues(allocCuda<ScalarType>(fixedImage.size().product())),
// _deviceMMvalues(allocCuda<ScalarType>(fixedImage.size().product())),
// _deviceFMvalues(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceCCvalues(allocCuda<ScalarType>(fixedImage.size().product())),
_deviceNumberOfPixelsCounted(allocCuda<int>(fixedImage.size().product())),
_deviceGradients(allocCuda<VecF>(fixedImage.size().product())),
_deviceMovingImage(allocCuda<ScalarType>(movingImage.size().product(), movingImage.coeffs())),
_deviceFixedImage(allocCuda<ScalarType>(fixedImage.size().product(), fixedImage.data())),
_deviceCpImage(allocCuda<VecF>(cpImage.size().product())),
_deviceCpwImage(allocCuda<ScalarType>(cpwImage.size().product())),
_deviceWImage(allocCuda<ScalarType>(kernel.wImage().size().product(), kernel.wImage().data())),
_deviceWTensor(allocCuda<MatF>(kernel.wTensor().size().product(), kernel.wTensor().data())),
_deviceCubeOffsets(_subsample > 1 ? allocCuda<VecI>(_subsampledSize.product()) : nullptr),
_kernel(kernel),
_movingImage(movingImage),
_fixedImage(fixedImage),
_cpImage(cpImage),
_cpwImage(cpwImage),
is_evaluated_once(false)
{
_movingImage.assignCoeffs(_deviceMovingImage.get());
_fixedImage.assignData(_deviceFixedImage.get());
_cpImage.assignData(_deviceCpImage.get());
_cpwImage.assignData(_deviceCpwImage.get());
_kernel.wImage().assignData(_deviceWImage.get());
_kernel.wTensor().assignData(_deviceWTensor.get());
}
GpuKernelEvaluator::EvaluationResult
GpuKernelEvaluator::getValue(MEASURE metric,
const VecF *cpData,
const ScalarType *cpwData,
ImageNearest<VecF, SpaceDimensions> displacementField,
bool do_resampling)
{
VecI fixedSize = _fixedImage.size();
VecI paramSize = _cpImage.size();
cudaCheck(cudaMemcpy( _deviceCpImage.get(), cpData, _cpImage.size().product()*sizeof(VecF), cudaMemcpyHostToDevice), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_deviceCpwImage.get(), cpwData, _cpwImage.size().product()*sizeof(ScalarType), cudaMemcpyHostToDevice), __FILE__, __LINE__);
if (_subsample > 1) {
int numTexels = _subsampledSize.product();
if(!is_evaluated_once || do_resampling){
for (int i = 0; i < numTexels; ++i)
_cubeOffsets[i] = VecI(_rng.nextV<SpaceDimensions>()*static_cast<ScalarType>(_subsample));
is_evaluated_once = true;
}
cudaCheck(cudaMemcpy(_deviceCubeOffsets.get(), _cubeOffsets.get(), _subsampledSize.product()*sizeof(VecI), cudaMemcpyHostToDevice), __FILE__, __LINE__);
}
if (displacementField.data() != _displacementFieldPtr) {
_displacementFieldPtr = displacementField.data();
_displacementField = displacementField;
_deviceDisplacementField = allocCuda<VecF>(displacementField.size().product());
cudaCheck(cudaMemcpy(_deviceDisplacementField.get(), _displacementFieldPtr, displacementField.size().product()*sizeof(VecF), cudaMemcpyHostToDevice), __FILE__, __LINE__);
_displacementField.assignData(_deviceDisplacementField.get());
}
GpuParams params {
paramSize,
_subsampledSize,
_subsample,
_subsampleNeighborhood,
_deviceCubeOffsets.get(),
_kernel,
_movingImage,
_fixedImage,
_cpImage,
_cpwImage,
_displacementField,
_displacementFieldPtr != nullptr,
_regRKHS,
_regRD,
_regPG,
_regRDScaling,
_regPGScaling,
_deviceGradients.get(),
nullptr, // displacements
_deviceNumberOfPixelsCounted.get(),
nullptr, // derivatives
_deviceBilateralRegularization.get()
};
//------------------------------------------------------------------------------------------------------
// calculating loss function values
#if SpaceDimensions == 3
dim3 threadsPerBlock(4, 4, 4);
#else
dim3 threadsPerBlock(16, 16);
#endif
dim3 blocksPerGrid = dim3(
(_subsampledSize[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(_subsampledSize[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(_subsampledSize[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
ScalarType measure = 0.0;
int pixelsCounted = 0;
if(metric == MEASURE::MSE){
GpuMSEParams mse_params{
_deviceDiffs.get(),
};
if (_subsample == 1){
resolveValueMSE<false>(blocksPerGrid, threadsPerBlock, params, mse_params);
}
else{
resolveValueMSE<true> (blocksPerGrid, threadsPerBlock, params, mse_params);
}
cudaCheck(cudaMemcpy(_diffs.get(), _deviceDiffs.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
ScalarType mse = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
mse += _diffs[i]*_diffs[i]; // TODO: could be done with only fvalues and mvalues. diffs is not needed.
pixelsCounted += _numberOfPixelsCounted[i];
}
mse /= pixelsCounted; // pixelsCounted should be checked to zero
measure = mse;
}
else if(metric == MEASURE::NCC){
GpuNCCParams ncc_params{
0, // mean_mf;
0, // mean_mm;
_deviceFvalues.get(),
_deviceMvalues.get(),
// _deviceFFvalues.get(),
// _deviceMMvalues.get(),
// _deviceFMvalues.get(),
nullptr,
nullptr
};
if (_subsample == 1){
resolveValueNCC<false>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
else{
resolveValueNCC<true> (blocksPerGrid, threadsPerBlock, params, ncc_params);
}
cudaCheck(cudaMemcpy(_fvalues.get(), _deviceFvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_mvalues.get(), _deviceMvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(cudaMemcpy(_ffvalues.get(), _deviceFFvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(cudaMemcpy(_mmvalues.get(), _deviceMMvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(cudaMemcpy(_fmvalues.get(), _deviceFMvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
ScalarType denom = 1.0;
ScalarType smm = 0.0;
ScalarType sfm = 0.0;
ScalarType sf = 0.0;
ScalarType sm = 0.0;
ScalarType sff = 0.0;
smm = 0.0;
sfm = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// sff += _ffvalues[i];
// smm += _mmvalues[i];
// sfm += _fmvalues[i];
sff += _fvalues[i]*_fvalues[i];
smm += _mvalues[i]*_mvalues[i];
sfm += _fvalues[i]*_mvalues[i];
sf += _fvalues[i];
sm += _mvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
// subtract mean
sff -= (sf*sf/pixelsCounted);
smm -= (sm*sm/pixelsCounted);
sfm -= (sf*sm/pixelsCounted);
denom = -1.0 * std::sqrt(sff*smm);
ScalarType ncc = 0.0;
if(denom!=0)
ncc = sfm/denom;
measure = ncc;
}
else if(metric == MEASURE::LCC){
GpuLCCParams lcc_params{
_deviceCCvalues.get(),
};
if (_subsample == 1){
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<false>(blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<false>(1, 1, params, lcc_params);
#endif
}
else{
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<true> (blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<true> (1, 1, params, lcc_params);
#endif
}
cudaCheck(cudaMemcpy( _ccvalues.get(), _deviceCCvalues .get(), fixedSize.product()*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
ScalarType lcc = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// ScalarType v = _ccvalues[i];
// if(std::isfinite(v))
// lcc += v;
lcc += _ccvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
lcc /= pixelsCounted; // pixelsCounted should be checked to zero
measure = lcc;
}
//------------------------------------------------------------------------------------------------------
// calculating bilateral regularizer
// copy current derivatives to gpu
ScalarType reg_value_rkhs = 0;
ScalarType reg_value_rd = 0;
ScalarType reg_value_pg = 0;
/** RKHS norm: c'Kc */
if(_regRKHS>0){
if (params.kernel.useWeightImage())
regularizerRKHS<true, false, false><<<blocksPerGrid, threadsPerBlock>>>(params);
else if (params.kernel.useWeightTensor())
regularizerRKHS<false, true, false><<<blocksPerGrid, threadsPerBlock>>>(params);
else
regularizerRKHS<false, false, false><<<blocksPerGrid, threadsPerBlock>>>(params);
cudaCheck(cudaMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rkhs += _bilateralRegularization[i];
}
if(_regRD>0){
if (params.kernel.useWeightImage())
regularizerRD<true, false, false><<<blocksPerGrid, threadsPerBlock>>>(params);
else if (params.kernel.useWeightTensor())
regularizerRD<false, true, false><<<blocksPerGrid, threadsPerBlock>>>(params);
else
regularizerRD<false, false, false><<<blocksPerGrid, threadsPerBlock>>>(params);
cudaCheck(cudaMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rd += _bilateralRegularization[i];
}
if(_regPG){
if (params.kernel.useWeightImage())
regularizerPG<true, false, false><<<blocksPerGrid, threadsPerBlock>>>(params);
else if (params.kernel.useWeightTensor())
regularizerPG<false, true, false><<<blocksPerGrid, threadsPerBlock>>>(params);
else
regularizerPG<false, false, false><<<blocksPerGrid, threadsPerBlock>>>(params);
cudaCheck(cudaMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_pg += _bilateralRegularization[i];
}
return EvaluationResult{measure, reg_value_rkhs, reg_value_rd, reg_value_pg, pixelsCounted};
}
// TODO
GpuKernelEvaluator::EvaluationResult
GpuKernelEvaluator::getValueAndDerivative(MEASURE metric,
const VecF *cpData,
const ScalarType *cpwData,
ImageNearest<VecF, SpaceDimensions> displacementField,
ScalarType *derivatives,
bool do_resampling)
{
VecI fixedSize = _fixedImage.size();
VecI paramSize = _cpImage.size();
cudaCheck(cudaMemcpy( _deviceCpImage.get(), cpData, _cpImage.size().product()*sizeof(VecF), cudaMemcpyHostToDevice), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_deviceCpwImage.get(), cpwData, _cpwImage.size().product()*sizeof(ScalarType), cudaMemcpyHostToDevice), __FILE__, __LINE__);
if (_subsample > 1) {
int numTexels = _subsampledSize.product();
if(!is_evaluated_once || do_resampling){
for (int i = 0; i < numTexels; ++i)
_cubeOffsets[i] = VecI(_rng.nextV<SpaceDimensions>()*static_cast<ScalarType>(_subsample));
is_evaluated_once = true;
}
cudaCheck(cudaMemcpy(_deviceCubeOffsets.get(), _cubeOffsets.get(), _subsampledSize.product()*sizeof(VecI), cudaMemcpyHostToDevice), __FILE__, __LINE__);
}
if (displacementField.data() != _displacementFieldPtr) {
_displacementFieldPtr = displacementField.data();
_displacementField = displacementField;
_deviceDisplacementField = allocCuda<VecF>(displacementField.size().product());
cudaCheck(cudaMemcpy(_deviceDisplacementField.get(), _displacementFieldPtr, displacementField.size().product()*sizeof(VecF), cudaMemcpyHostToDevice), __FILE__, __LINE__);
_displacementField.assignData(_deviceDisplacementField.get());
}
GpuParams params {
paramSize,
_subsampledSize,
_subsample,
_subsampleNeighborhood,
_deviceCubeOffsets.get(),
_kernel,
_movingImage,
_fixedImage,
_cpImage,
_cpwImage,
_displacementField,
_displacementFieldPtr != nullptr,
_regRKHS,
_regRD,
_regPG,
_regRDScaling,
_regPGScaling,
_deviceGradients.get(),
nullptr,
_deviceNumberOfPixelsCounted.get(),
_deviceDerivatives.get(),
_deviceBilateralRegularization.get()
};
//------------------------------------------------------------------------------------------------------
// calculating loss function values
#if SpaceDimensions == 3
dim3 threadsPerBlock(4, 4, 4);
#else
dim3 threadsPerBlock(16, 16);
#endif
dim3 blocksPerGrid = dim3(
(_subsampledSize[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(_subsampledSize[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(_subsampledSize[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
ScalarType measure = 0.0;
int pixelsCounted = 0;
ScalarType denom = 1.0;
ScalarType smm = 0.0;
ScalarType sfm = 0.0;
ScalarType mean_mf = 0.0;
ScalarType mean_mm = 0.0;
if(metric == MEASURE::MSE){
GpuMSEParams mse_params{
_deviceDiffs.get(),
};
if (_subsample == 1){
resolveValueMSE<false>(blocksPerGrid, threadsPerBlock, params, mse_params);
}
else{
resolveValueMSE<true> (blocksPerGrid, threadsPerBlock, params, mse_params);
}
cudaCheck(cudaMemcpy(_diffs.get(), _deviceDiffs.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
ScalarType mse = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
mse += _diffs[i]*_diffs[i]; // TODO: could be done with only fvalues and mvalues. diffs is not needed.
pixelsCounted += _numberOfPixelsCounted[i];
}
mse /= pixelsCounted; // pixelsCounted should be checked to zero
measure = mse;
}
else if(metric == MEASURE::NCC){
GpuNCCParams ncc_params{
0, // mean_mf;
0, // mean_mm;
_deviceFvalues.get(),
_deviceMvalues.get(),
// _deviceFFvalues.get(),
// _deviceMMvalues.get(),
// _deviceFMvalues.get(),
_deviceDerivativesF.get(),
_deviceDerivativesM.get()
};
if (_subsample == 1){
resolveValueNCC<false>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
else{
resolveValueNCC<true>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
cudaCheck(cudaMemcpy(_fvalues.get(), _deviceFvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_mvalues.get(), _deviceMvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(cudaMemcpy(_ffvalues.get(), _deviceFFvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(cudaMemcpy(_mmvalues.get(), _deviceMMvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
// cudaCheck(cudaMemcpy(_fmvalues.get(), _deviceFMvalues.get(), fixedSize.product()*sizeof( ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
ScalarType sf = 0.0;
ScalarType sm = 0.0;
ScalarType sff = 0.0;
smm = 0.0;
sfm = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// sff += _ffvalues[i];
// smm += _mmvalues[i];
// sfm += _fmvalues[i];
sff += _fvalues[i]*_fvalues[i];
smm += _mvalues[i]*_mvalues[i];
sfm += _fvalues[i]*_mvalues[i];
sf += _fvalues[i];
sm += _mvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
// subtract mean
sff -= (sf*sf/pixelsCounted);
smm -= (sm*sm/pixelsCounted);
sfm -= (sf*sm/pixelsCounted);
denom = -1.0 * std::sqrt(sff*smm);
ScalarType ncc = 0.0;
if(denom!=0)
ncc = sfm/denom;
measure = ncc;
// save mean f and m values in GPU parameter struct
// since they are needed in calculating the derivative
mean_mf = sf/pixelsCounted;
mean_mm = sm/pixelsCounted;
}
else if(metric == MEASURE::LCC){
GpuLCCParams lcc_params{
_deviceCCvalues.get(),
};
if (_subsample == 1){
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<false>(blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<false>(1, 1, params, lcc_params);
#endif
}
else{
#ifndef EXEC_SINGLE_THREAD
resolveValueLCC<true>(blocksPerGrid, threadsPerBlock, params, lcc_params);
#else
resolveValueLCC<true>(1, 1, params, lcc_params);
#endif
}
cudaCheck(cudaMemcpy( _ccvalues.get(), _deviceCCvalues .get(), fixedSize.product()*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_numberOfPixelsCounted.get(), _deviceNumberOfPixelsCounted.get(), fixedSize.product()*sizeof( int), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
ScalarType lcc = 0.0;
pixelsCounted = 0;
for (int i = 0; i < _subsampledSize.product(); ++i) {
// ScalarType v = _ccvalues[i];
// if(std::isfinite(v))
// lcc += v;
lcc += _ccvalues[i];
pixelsCounted += _numberOfPixelsCounted[i];
}
lcc /= pixelsCounted; // pixelsCounted should be checked to zero
measure = lcc;
}
//------------------------------------------------------------------------------------------------------
// calculating derivatives
cudaMemset(_deviceDerivatives.get(), 0, _numberOfParameters*sizeof(ScalarType));
blocksPerGrid = dim3(
(paramSize[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(paramSize[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(paramSize[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
if(metric == MEASURE::MSE){
GpuMSEParams mse_params{
_deviceDiffs.get(),
};
if (_subsample == 1){
resolveDerivativeMSE<false>(blocksPerGrid, threadsPerBlock, params, mse_params);
}
else{
resolveDerivativeMSE<true> (blocksPerGrid, threadsPerBlock, params, mse_params);
}
cudaCheck(cudaMemcpy(derivatives, _deviceDerivatives.get(), _numberOfParameters*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters; ++i)
derivatives[i] /= pixelsCounted;
}
else if(metric == MEASURE::NCC){
GpuNCCParams ncc_params{
mean_mf,
mean_mm,
_deviceFvalues.get(),
_deviceMvalues.get(),
// _deviceFFvalues.get(),
// _deviceMMvalues.get(),
// _deviceFMvalues.get(),
_deviceDerivativesF.get(),
_deviceDerivativesM.get()
};
if (_subsample == 1){
resolveDerivativeNCC<false>(blocksPerGrid, threadsPerBlock, params, ncc_params);
}
else{
resolveDerivativeNCC<true> (blocksPerGrid, threadsPerBlock, params, ncc_params);
}
cudaCheck(cudaMemcpy(_derivativesF.get(), _deviceDerivativesF.get(), _numberOfParameters*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_derivativesM.get(), _deviceDerivativesM.get(), _numberOfParameters*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters; ++i){
if(denom!=0)
derivatives[i] = ( _derivativesF[i] - (sfm/smm)*_derivativesM[i] ) / denom;
else
derivatives[i] = 0.0;
}
}
else if(metric == MEASURE::LCC){
if (_subsample == 1){
resolveDerivativeLCC<false>(blocksPerGrid, threadsPerBlock, params);
}
else{
resolveDerivativeLCC<true> (blocksPerGrid, threadsPerBlock, params);
}
cudaCheck(cudaMemcpy(derivatives, _deviceDerivatives.get(), _numberOfParameters*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters; ++i)
derivatives[i] /= pixelsCounted;
}
//------------------------------------------------------------------------------------------------------
// calculating bilateral regularizer
// copy current derivatives to gpu
if(_regRKHS>0 || _regRD>0 || _regPG>0){
cudaCheck(cudaMemcpy(_deviceDerivatives.get(), derivatives, _numberOfParameters*sizeof(ScalarType), cudaMemcpyHostToDevice), __FILE__, __LINE__);
}
ScalarType reg_value_rkhs = 0;
ScalarType reg_value_rd = 0;
ScalarType reg_value_pg = 0;
/** RKHS norm: c'Kc */
if(_regRKHS>0){
if (params.kernel.useWeightImage())
regularizerRKHS<true, false, true><<<blocksPerGrid, threadsPerBlock>>>(params);
else if (params.kernel.useWeightTensor())
regularizerRKHS<false, true, true><<<blocksPerGrid, threadsPerBlock>>>(params);
else
regularizerRKHS<false, false, true><<<blocksPerGrid, threadsPerBlock>>>(params);
cudaCheck(cudaMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rkhs += _bilateralRegularization[i];
}
if(_regRD>0){
if (params.kernel.useWeightImage())
regularizerRD<true, false, true><<<blocksPerGrid, threadsPerBlock>>>(params);
else if (params.kernel.useWeightTensor())
regularizerRD<false, true, true><<<blocksPerGrid, threadsPerBlock>>>(params);
else
regularizerRD<false, false, true><<<blocksPerGrid, threadsPerBlock>>>(params);
cudaCheck(cudaMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_rd += _bilateralRegularization[i];
}
if(_regPG){
if (params.kernel.useWeightImage())
regularizerPG<true, false, true><<<blocksPerGrid, threadsPerBlock>>>(params);
else if (params.kernel.useWeightTensor())
regularizerPG<false, true, true><<<blocksPerGrid, threadsPerBlock>>>(params);
else
regularizerPG<false, false, true><<<blocksPerGrid, threadsPerBlock>>>(params);
cudaCheck(cudaMemcpy(_bilateralRegularization.get(), _deviceBilateralRegularization.get(), _numberOfParameters/SpaceDimensions*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
for (int i = 0; i < _numberOfParameters/SpaceDimensions; ++i)
reg_value_pg += _bilateralRegularization[i];
}
// fetch current derivative which has been updated by the bilateral regularizers
if(_regRKHS>0 || _regRD>0 || _regPG>0){
cudaCheck(cudaMemcpy(derivatives, _deviceDerivatives.get(), _numberOfParameters*sizeof(ScalarType), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
}
return EvaluationResult{measure, reg_value_rkhs, reg_value_rd, reg_value_pg, pixelsCounted};
}
void GpuKernelEvaluator::evaluateDisplacementField(const VecF *cpData, const ScalarType *cpwData, ImageNearest<VecF, SpaceDimensions> displacementField, VecF *dst)
{
cudaCheck(cudaMemcpy( _deviceCpImage.get(), cpData, _cpImage.size().product()*sizeof(VecF), cudaMemcpyHostToDevice), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(_deviceCpwImage.get(), cpwData, _cpwImage.size().product()*sizeof(ScalarType), cudaMemcpyHostToDevice), __FILE__, __LINE__);
if (displacementField.data() != _displacementFieldPtr) {
_displacementFieldPtr = displacementField.data();
_displacementField = displacementField;
_deviceDisplacementField.reset();
if (_displacementFieldPtr) {
_deviceDisplacementField = allocCuda<VecF>(displacementField.size().product());
cudaCheck(cudaMemcpy(_deviceDisplacementField.get(), _displacementFieldPtr, displacementField.size().product()*sizeof(VecF), cudaMemcpyHostToDevice), __FILE__, __LINE__);
}
_displacementField.assignData(_deviceDisplacementField.get());
}
GpuParams params {
_cpImage.size(),
_fixedImage.size(),
1,
1,
nullptr,
_kernel,
_movingImage,
_fixedImage,
_cpImage,
_cpwImage,
_displacementField,
_displacementField.data() != nullptr,
_regRKHS,
_regRD,
_regPG,
_regRDScaling,
_regPGScaling,
nullptr,
_deviceGradients.get(),
nullptr,
nullptr,
nullptr
};
#if SpaceDimensions == 3
dim3 threadsPerBlock(4, 4, 4);
#else
dim3 threadsPerBlock(16, 16);
#endif
dim3 blocksPerGrid = dim3(
(_fixedImage.size()[0] + threadsPerBlock.x - 1)/threadsPerBlock.x,
(_fixedImage.size()[1] + threadsPerBlock.y - 1)/threadsPerBlock.y,
#if SpaceDimensions == 3
(_fixedImage.size()[2] + threadsPerBlock.z - 1)/threadsPerBlock.z
#else
1
#endif
);
if (params.kernel.useWeightImage())
evaluateDisplacement<true, false><<<blocksPerGrid, threadsPerBlock>>>(params);
else if (params.kernel.useWeightTensor())
evaluateDisplacement<false, true><<<blocksPerGrid, threadsPerBlock>>>(params);
else
evaluateDisplacement<false, false><<<blocksPerGrid, threadsPerBlock>>>(params);
cudaCheck(cudaPeekAtLastError(), __FILE__, __LINE__);
cudaCheck(cudaDeviceSynchronize(), __FILE__, __LINE__);
cudaCheck(cudaMemcpy(dst, _deviceGradients.get(), _fixedImage.size().product()*sizeof(VecF), cudaMemcpyDeviceToHost), __FILE__, __LINE__);
}
void GpuKernelEvaluator::SetRegularizerRKHS(ScalarType weight){
if (weight != _regRKHS) {
_regRKHS = weight;
_bilateralRegularization.reset();
_deviceBilateralRegularization.reset();
if (_regRKHS > 0) {
_bilateralRegularization.reset(new ScalarType[_numberOfParameters/SpaceDimensions]);
_deviceBilateralRegularization = allocCuda<ScalarType>(_numberOfParameters/SpaceDimensions);
}
}
}
void GpuKernelEvaluator::SetRegularizerRD(ScalarType weight, ScalarType scaling){
if (weight != _regRD) {
_regRD = weight;
_bilateralRegularization.reset();
_deviceBilateralRegularization.reset();
if (_regRD > 0) {
_bilateralRegularization.reset(new ScalarType[_numberOfParameters/SpaceDimensions]);
_deviceBilateralRegularization = allocCuda<ScalarType>(_numberOfParameters/SpaceDimensions);
}
}
if(scaling<=0.0){
std::cout << "Attention: scaling of regularizer must be strictly positive!" << std::endl;
}
_regRDScaling = scaling;
}
void GpuKernelEvaluator::SetRegularizerPG(ScalarType weight, ScalarType scaling){
if (weight != _regPG) {
_regPG = weight;
_bilateralRegularization.reset();
_deviceBilateralRegularization.reset();
if (_regPG > 0) {
_bilateralRegularization.reset(new ScalarType[_numberOfParameters/SpaceDimensions]);
_deviceBilateralRegularization = allocCuda<ScalarType>(_numberOfParameters/SpaceDimensions);
}
}
if(scaling<=0.0){
std::cout << "Attention: scaling of regularizer must be strictly positive!" << std::endl;
}
_regPGScaling = scaling;
} |
00834208d13ef354783b70a87b18fe182c1469e1.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 192
#define N 96
#define H 28
#define W 28
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[4608];
__shared__ float kernel_shared[6912];
float pad_temp_shared_local[18];
float kernel_shared_local[144];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 8; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 42; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 6)) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 96)) < 48) {
if ((((((int)threadIdx.z) * 144) + (((int)threadIdx.y) * 36)) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 4)) < 288) {
if (((((((int)threadIdx.z) * 2304) + (((int)threadIdx.y) * 576)) + (((int)threadIdx.x) * 42)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 4608) {
if ((((((int)threadIdx.y) * 576) + (((int)threadIdx.x) * 42)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2304) {
if (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 576) {
pad_temp_shared[(((((((int)threadIdx.z) * 2304) + (((int)threadIdx.y) * 576)) + (((int)threadIdx.x) * 42)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 96) >> 4))) && (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 96) >> 4)) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 15)) < 29)) ? data[((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 96) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 96) >> 4) * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 15)) - 29))] : 0.000000e+00f);
}
}
}
}
}
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 62; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 432)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 9)) < 768) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 3)) < 2304) {
if (((((((int)threadIdx.z) * 3456) + (((int)threadIdx.y) * 864)) + (((int)threadIdx.x) * 62)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 6912) {
if ((((((int)threadIdx.y) * 864) + (((int)threadIdx.x) * 62)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 3456) {
if (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 864) {
kernel_shared[(((((((int)threadIdx.z) * 3456) + (((int)threadIdx.y) * 864)) + (((int)threadIdx.x) * 62)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 27648) + (((int)threadIdx.z) * 13824)) + (((int)threadIdx.y) * 3456)) + ((((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 432) * 1728)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 432)))];
}
}
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
#pragma unroll
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 6; ++ax1) {
#pragma unroll
for (int ax2 = 0; ax2 < 3; ++ax2) {
pad_temp_shared_local[(((ax1 * 3) + ax2))] = pad_temp_shared[(((((((rc_inner_outer * 576) + (ax1 * 96)) + (ax2 * 16)) + (((int)threadIdx.y) * 16)) + ((int)threadIdx.x)) + rx_inner_outer))];
}
}
#pragma unroll
for (int ax0 = 0; ax0 < 8; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 6; ++ax11) {
#pragma unroll
for (int ax21 = 0; ax21 < 3; ++ax21) {
kernel_shared_local[((((ax0 * 18) + (ax11 * 3)) + ax21))] = kernel_shared[(((((((((int)threadIdx.z) * 3456) + (ax0 * 432)) + (rc_inner_outer * 54)) + (ax11 * 9)) + (ax21 * 3)) + rx_inner_outer))];
}
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 6; ++rc_inner_inner) {
#pragma unroll
for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 8; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(((rc_inner_inner * 3) + ry_inner_inner))] * kernel_shared_local[((((ff_c * 18) + (rc_inner_inner * 3)) + ry_inner_inner))]));
}
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 8; ++ff_inner_inner_inner) {
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
}
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(2,7,6);
dim3 block(14,4,2);
float * paddedInputDevice;
chkerr(hipMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(hipMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), hipMemcpyHostToDevice));
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
| 00834208d13ef354783b70a87b18fe182c1469e1.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 192
#define N 96
#define H 28
#define W 28
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[4608];
__shared__ float kernel_shared[6912];
float pad_temp_shared_local[18];
float kernel_shared_local[144];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 8; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 42; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 6)) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 96)) < 48) {
if ((((((int)threadIdx.z) * 144) + (((int)threadIdx.y) * 36)) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 4)) < 288) {
if (((((((int)threadIdx.z) * 2304) + (((int)threadIdx.y) * 576)) + (((int)threadIdx.x) * 42)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 4608) {
if ((((((int)threadIdx.y) * 576) + (((int)threadIdx.x) * 42)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2304) {
if (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 576) {
pad_temp_shared[(((((((int)threadIdx.z) * 2304) + (((int)threadIdx.y) * 576)) + (((int)threadIdx.x) * 42)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 96) >> 4))) && (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 96) >> 4)) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 15)) < 29)) ? data[((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 96) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 96) >> 4) * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 42) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) & 15)) - 29))] : 0.000000e+00f);
}
}
}
}
}
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 62; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 432)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 9)) < 768) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 3)) < 2304) {
if (((((((int)threadIdx.z) * 3456) + (((int)threadIdx.y) * 864)) + (((int)threadIdx.x) * 62)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 6912) {
if ((((((int)threadIdx.y) * 864) + (((int)threadIdx.x) * 62)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 3456) {
if (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 864) {
kernel_shared[(((((((int)threadIdx.z) * 3456) + (((int)threadIdx.y) * 864)) + (((int)threadIdx.x) * 62)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 27648) + (((int)threadIdx.z) * 13824)) + (((int)threadIdx.y) * 3456)) + ((((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 432) * 1728)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 62) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 432)))];
}
}
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
#pragma unroll
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 6; ++ax1) {
#pragma unroll
for (int ax2 = 0; ax2 < 3; ++ax2) {
pad_temp_shared_local[(((ax1 * 3) + ax2))] = pad_temp_shared[(((((((rc_inner_outer * 576) + (ax1 * 96)) + (ax2 * 16)) + (((int)threadIdx.y) * 16)) + ((int)threadIdx.x)) + rx_inner_outer))];
}
}
#pragma unroll
for (int ax0 = 0; ax0 < 8; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 6; ++ax11) {
#pragma unroll
for (int ax21 = 0; ax21 < 3; ++ax21) {
kernel_shared_local[((((ax0 * 18) + (ax11 * 3)) + ax21))] = kernel_shared[(((((((((int)threadIdx.z) * 3456) + (ax0 * 432)) + (rc_inner_outer * 54)) + (ax11 * 9)) + (ax21 * 3)) + rx_inner_outer))];
}
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 6; ++rc_inner_inner) {
#pragma unroll
for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 8; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(((rc_inner_inner * 3) + ry_inner_inner))] * kernel_shared_local[((((ff_c * 18) + (rc_inner_inner * 3)) + ry_inner_inner))]));
}
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 8; ++ff_inner_inner_inner) {
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
}
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(2,7,6);
dim3 block(14,4,2);
float * paddedInputDevice;
chkerr(cudaMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(cudaMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), cudaMemcpyHostToDevice));
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
|
661e057dcdd961c60e766558c46744d54544d9e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ static void LTS(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number)
{
//
float C=100;
float k=1;
float vr=-56;
float vt=-42;
float G_up=1;
float G_down=1;
float a=0.03;
float b=8;
float c=-50;
float d=20;
float v_peak=40;
float sq=0,sp=0;
float I;
float v=neuro[number].v;
float u=neuro[number].u;
I=Ix[number].I;
//Izhikevich model
v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C;
u=u+tau*a*(b*(v-vr)-u);
spike[number]=0;
if(v>v_peak)
{
v=c;
u=u+d;
spike[number]=1;
}
u=fmin(670,u);
neuro[number].v=v;
neuro[number].u=u;
Ix[number].I=0;
}
__global__ static void LTS_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1]+THREAD_NUM[2]*BLOCK_NUM[2]+THREAD_NUM[3]*BLOCK_NUM[3]+THREAD_NUM[4]*BLOCK_NUM[4])*10+(bid * THREAD_NUM[5] + tid)*10;
/*****************/
if((number+0)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+0);}
/****************/
if((number+1)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+1);}
/****************/
if((number+2)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+2);}
/*****************/
if((number+3)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+3);}
/*****************/
if((number+4)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+4);}
/*****************/
if((number+5)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+5);}
/****************/
if((number+6)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+6);}
/*****************/
if((number+7)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+7);}
/*****************/
if((number+8)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+8);}
/*****************/
if((number+9)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+9);}
}
| 661e057dcdd961c60e766558c46744d54544d9e5.cu | #include "cuda_runtime.h"
#include <stdio.h>
__device__ static void LTS(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number)
{
//设置神经元计算参数
float C=100;
float k=1;
float vr=-56;
float vt=-42;
float G_up=1;
float G_down=1;
float a=0.03;
float b=8;
float c=-50;
float d=20;
float v_peak=40;
float sq=0,sp=0;
float I;
float v=neuro[number].v;
float u=neuro[number].u;
I=Ix[number].I;
//Izhikevich model
v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C;
u=u+tau*a*(b*(v-vr)-u);
spike[number]=0;
if(v>v_peak)
{
v=c;
u=u+d;
spike[number]=1;
}
u=fmin(670,u);
neuro[number].v=v;
neuro[number].u=u;
Ix[number].I=0;
}
__global__ static void LTS_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1]+THREAD_NUM[2]*BLOCK_NUM[2]+THREAD_NUM[3]*BLOCK_NUM[3]+THREAD_NUM[4]*BLOCK_NUM[4])*10+(bid * THREAD_NUM[5] + tid)*10;
/********第一个神经元虚拟计算内核*********/
if((number+0)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+0);}
/********第二个神经元虚拟计算内核********/
if((number+1)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+1);}
/********第三个神经元虚拟计算内核********/
if((number+2)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+2);}
/********第四个神经元虚拟计算内核*********/
if((number+3)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+3);}
/********第五个神经元虚拟计算内核*********/
if((number+4)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+4);}
/********第六个神经元虚拟计算内核*********/
if((number+5)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+5);}
/********第七个神经元虚拟计算内核********/
if((number+6)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+6);}
/********第八个神经元虚拟计算内核*********/
if((number+7)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+7);}
/********第九个神经元虚拟计算内核*********/
if((number+8)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+8);}
/********第十个神经元虚拟计算内核*********/
if((number+9)<=boxnum[5])
{LTS(input,neuro,spike,Ix,number+9);}
}
|
4fb1adeddf9d4b85d106ef84570df765c599ba1b.hip | // !!! This is a file automatically generated by hipify!!!
//jacobi7.cu
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include "getopt.h"
#include <cuda_call.h>
#include <jacobi7_cuda_35d.h>
#include <jacobi7.h>
//#ifndef TIME_TILE_SIZE
//#warning TIME_TILE_SIZE is not set, defaulting to 1
//#define TIME_TILE_SIZE 2
//#endif
// Timer function
double rtclock(){
struct timeval tp;
gettimeofday(&tp, NULL);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main(int argc, char* *argv){
if(argc != 9) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS> <BX> <BY> \n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
const int bx = atoi(argv[7]);
const int by = atoi(argv[8]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_dA;
float *h_dB;
float *d_dA;
float *d_dB;
float *h_dA1;
float *h_dB1;
// Allocate host buffers
h_dA = (float*) malloc(xyz_bytes);
h_dB = (float*) malloc(xyz_bytes);
h_dA1 = (float*) malloc(xyz_bytes);
h_dB1 = (float*) malloc(xyz_bytes);
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
for(int i = 0; i < xyz; i++) {
h_dA[i] = i*0.01;//(float)rand() / (float)RAND_MAX;
h_dB[i] = h_dA[i];
h_dA1[i] = h_dA[i];
h_dB1[i] = h_dA[i];
}
float *share_out_d = (float*) malloc((tx+2) * (ty+2) * sizeof(float));
float *share_out_h = (float*) malloc((tx+2) * (ty+2) * sizeof(float));
printf("Start computing...\n");
printf("h_dB[%d]:%f\n", 2+nx*(3+ny*4), h_dB[2+nx*(3+ny*4)]);
printf("h_dA[%d]:%f\n", 3+nx*(4+ny*5), h_dA[3+nx*(4+ny*5)]);
/*float *B = 0;
const int ldb = 0;
const int ldc = 0;*/
// Always use device 0
hipSetDevice(0);
/* set the ratio of cache/shared memory
hipFuncCachePreferNone: Default function cache configuration, no preference
hipFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
hipFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
*/
CHECK_CALL(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
// Allocate device buffers
CHECK_CALL(hipMalloc((void**)&d_dA, xyz_bytes));
CHECK_CALL(hipMalloc((void**)&d_dB, xyz_bytes));
CHECK_CALL(hipMalloc((void**)&share_out_d, (tx+2) * (ty+2) * sizeof(float)));
// Copy to device
CHECK_CALL(hipMemcpy(d_dA, h_dA, xyz_bytes, hipMemcpyHostToDevice));
//CHECK_CALL(hipMemcpy(d_dB, h_dB, xyz_bytes, hipMemcpyHostToDevice));
//CHECK_CALL(hipMemcpy(d_dB, d_dA, xyz_bytes, hipMemcpyDeviceToDevice));
// Setup the kernel
dim3 grid(nx/(tx-2), ny/(ty-2), 1);
dim3 block(tx, ty);
if (nx % (tx- 2)) ++grid.x;
if (ny % (ty - 2)) ++grid.y;
float *tmp;
const float fac = 6.0/(h_dA[0] * h_dA[0]);
const int sharedMemSize = 9 * (block.x+2) * (block.y+2) * sizeof(float);
printf("sharedmemeory Size:%dk\n",sharedMemSize/1024);
//double startTime = rtclock();
// Run the GPU kernel
//for(int t = 0; t < timesteps; t += 2) {
//jacobi7_35d<<<grid, block, sharedMemSize>>>(d_dA, d_dB, nx, ny, nz, fac);
hipLaunchKernelGGL(( jacobi7_35d_halo_test), dim3(grid), dim3(block), sharedMemSize, 0, d_dA, share_out_d, nx, ny, nz, fac, bx, by);
hipMemcpy(share_out_h, share_out_d, (tx+2)*(ty+2)*sizeof(float), hipMemcpyDeviceToHost);
for(int i = 0; i<tx+2; i++){
for(int j=0; j< ty+2; j++){
printf("%f ",share_out_h[i+j*(tx+2)]);
}
printf("\n");
}
// printf("t:%d\n",t);
// swap input and output
//tmp = d_dA;
//d_dA = d_dB;
//d_dB = tmp;
//}
//SYNC_DEVICE();
//ASSERT_STATE("jacobi7_35d");
//double endTime = rtclock();
// double elapsedTimeG = endTime - startTime;
/*
printf("Elapsed Time:%lf\n", elapsedTimeG);
double flops = xyz * 7.0 * timesteps;
double gflops = flops / elapsedTimeG / 1e9;
printf("(GPU) %lf GFlop/s\n", gflops);
// Copy the result to main memory
float *resultG;
if((timesteps/2) % 2){
CHECK_CALL(hipMemcpy(h_dA, d_dA, xyz_bytes, hipMemcpyDeviceToHost));
}
else{
CHECK_CALL(hipMemcpy(h_dA, d_dB, xyz_bytes, hipMemcpyDeviceToHost));
}
resultG = h_dA;
// Run the CPU version
startTime = rtclock();
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_dA1, B, ldb, h_dB1, ldc);
tmp = h_dA1;
h_dA1 = h_dB1;
h_dB1 = tmp;
}
float *resultC;
if (timesteps % 2)
resultC = h_dB1;
else
resultC = h_dA1;
endTime = rtclock();
double elapsedTimeC = endTime - startTime;
printf("Elapsed Time:%lf\n", elapsedTimeC);
flops = xyz * 7.0 * timesteps;
gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
for (int i = 0; i < xyz; ++i){
diff = resultC[i] - resultG[i];
errorNorm += diff * diff;
refNorm += resultC[i] * resultG[i];
}
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
printf("resultC[%d]:%f\n", 2+nx*(3+ny*4), resultC[2+nx*(3+ny*4)]);
printf("resultG[%d]:%f\n", 2+nx*(3+ny*4), resultG[2+nx*(3+ny*4)]);
printf("-----------------------------------\n");
printf("resultC[%d]:%f\n", 3+nx*(4+ny*5), resultC[3+nx*(4+ny*5)]);
printf("resultG[%d]:%f\n", 3+nx*(4+ny*5), resultG[3+nx*(4+ny*5)]);
*/
// Free buffers
free(h_dA);
free(h_dB);
free(h_dA1);
free(h_dB1);
CHECK_CALL(hipFree(d_dA));
CHECK_CALL(hipFree(d_dB));
} | 4fb1adeddf9d4b85d106ef84570df765c599ba1b.cu | //jacobi7.cu
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include "getopt.h"
#include <cuda_call.h>
#include <jacobi7_cuda_35d.h>
#include <jacobi7.h>
//#ifndef TIME_TILE_SIZE
//#warning TIME_TILE_SIZE is not set, defaulting to 1
//#define TIME_TILE_SIZE 2
//#endif
// Timer function
double rtclock(){
struct timeval tp;
gettimeofday(&tp, NULL);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main(int argc, char* *argv){
if(argc != 9) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS> <BX> <BY> \n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
const int bx = atoi(argv[7]);
const int by = atoi(argv[8]);
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
float *h_dA;
float *h_dB;
float *d_dA;
float *d_dB;
float *h_dA1;
float *h_dB1;
// Allocate host buffers
h_dA = (float*) malloc(xyz_bytes);
h_dB = (float*) malloc(xyz_bytes);
h_dA1 = (float*) malloc(xyz_bytes);
h_dB1 = (float*) malloc(xyz_bytes);
// grid data iniatialization
// randomly generaed test data
srand(time(NULL));
for(int i = 0; i < xyz; i++) {
h_dA[i] = i*0.01;//(float)rand() / (float)RAND_MAX;
h_dB[i] = h_dA[i];
h_dA1[i] = h_dA[i];
h_dB1[i] = h_dA[i];
}
float *share_out_d = (float*) malloc((tx+2) * (ty+2) * sizeof(float));
float *share_out_h = (float*) malloc((tx+2) * (ty+2) * sizeof(float));
printf("Start computing...\n");
printf("h_dB[%d]:%f\n", 2+nx*(3+ny*4), h_dB[2+nx*(3+ny*4)]);
printf("h_dA[%d]:%f\n", 3+nx*(4+ny*5), h_dA[3+nx*(4+ny*5)]);
/*float *B = 0;
const int ldb = 0;
const int ldc = 0;*/
// Always use device 0
cudaSetDevice(0);
/* set the ratio of cache/shared memory
cudaFuncCachePreferNone: Default function cache configuration, no preference
cudaFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache
cudaFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory
*/
CHECK_CALL(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
// Allocate device buffers
CHECK_CALL(cudaMalloc((void**)&d_dA, xyz_bytes));
CHECK_CALL(cudaMalloc((void**)&d_dB, xyz_bytes));
CHECK_CALL(cudaMalloc((void**)&share_out_d, (tx+2) * (ty+2) * sizeof(float)));
// Copy to device
CHECK_CALL(cudaMemcpy(d_dA, h_dA, xyz_bytes, cudaMemcpyHostToDevice));
//CHECK_CALL(cudaMemcpy(d_dB, h_dB, xyz_bytes, cudaMemcpyHostToDevice));
//CHECK_CALL(cudaMemcpy(d_dB, d_dA, xyz_bytes, cudaMemcpyDeviceToDevice));
// Setup the kernel
dim3 grid(nx/(tx-2), ny/(ty-2), 1);
dim3 block(tx, ty);
if (nx % (tx- 2)) ++grid.x;
if (ny % (ty - 2)) ++grid.y;
float *tmp;
const float fac = 6.0/(h_dA[0] * h_dA[0]);
const int sharedMemSize = 9 * (block.x+2) * (block.y+2) * sizeof(float);
printf("sharedmemeory Size:%dk\n",sharedMemSize/1024);
//double startTime = rtclock();
// Run the GPU kernel
//for(int t = 0; t < timesteps; t += 2) {
//jacobi7_35d<<<grid, block, sharedMemSize>>>(d_dA, d_dB, nx, ny, nz, fac);
jacobi7_35d_halo_test<<<grid, block, sharedMemSize>>>(d_dA, share_out_d, nx, ny, nz, fac, bx, by);
cudaMemcpy(share_out_h, share_out_d, (tx+2)*(ty+2)*sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i<tx+2; i++){
for(int j=0; j< ty+2; j++){
printf("%f ",share_out_h[i+j*(tx+2)]);
}
printf("\n");
}
// printf("t:%d\n",t);
// swap input and output
//tmp = d_dA;
//d_dA = d_dB;
//d_dB = tmp;
//}
//SYNC_DEVICE();
//ASSERT_STATE("jacobi7_35d");
//double endTime = rtclock();
// double elapsedTimeG = endTime - startTime;
/*
printf("Elapsed Time:%lf\n", elapsedTimeG);
double flops = xyz * 7.0 * timesteps;
double gflops = flops / elapsedTimeG / 1e9;
printf("(GPU) %lf GFlop/s\n", gflops);
// Copy the result to main memory
float *resultG;
if((timesteps/2) % 2){
CHECK_CALL(cudaMemcpy(h_dA, d_dA, xyz_bytes, cudaMemcpyDeviceToHost));
}
else{
CHECK_CALL(cudaMemcpy(h_dA, d_dB, xyz_bytes, cudaMemcpyDeviceToHost));
}
resultG = h_dA;
// Run the CPU version
startTime = rtclock();
for(int t = 0; t < timesteps; t += 1) {
jacobi7(nx, ny, nz, h_dA1, B, ldb, h_dB1, ldc);
tmp = h_dA1;
h_dA1 = h_dB1;
h_dB1 = tmp;
}
float *resultC;
if (timesteps % 2)
resultC = h_dB1;
else
resultC = h_dA1;
endTime = rtclock();
double elapsedTimeC = endTime - startTime;
printf("Elapsed Time:%lf\n", elapsedTimeC);
flops = xyz * 7.0 * timesteps;
gflops = flops / elapsedTimeC / 1e9;
printf("(CPU) %lf GFlop/s\n", gflops);
// compare the results btw CPU and GPU version
double errorNorm, refNorm, diff;
errorNorm = 0.0;
refNorm = 0.0;
for (int i = 0; i < xyz; ++i){
diff = resultC[i] - resultG[i];
errorNorm += diff * diff;
refNorm += resultC[i] * resultG[i];
}
errorNorm = sqrt(errorNorm);
refNorm = sqrt(refNorm);
printf("Error Norm:%lf\n", errorNorm);
printf("Ref Norm:%lf\n", refNorm);
if(abs(refNorm) < 1e-7) {
printf("Correctness, FAILED\n");
}
else if((errorNorm / refNorm) > 1e-2) {
printf("Correctness, FAILED\n");
}
else {
printf("Correctness, PASSED\n");
}
printf("resultC[%d]:%f\n", 2+nx*(3+ny*4), resultC[2+nx*(3+ny*4)]);
printf("resultG[%d]:%f\n", 2+nx*(3+ny*4), resultG[2+nx*(3+ny*4)]);
printf("-----------------------------------\n");
printf("resultC[%d]:%f\n", 3+nx*(4+ny*5), resultC[3+nx*(4+ny*5)]);
printf("resultG[%d]:%f\n", 3+nx*(4+ny*5), resultG[3+nx*(4+ny*5)]);
*/
// Free buffers
free(h_dA);
free(h_dB);
free(h_dA1);
free(h_dB1);
CHECK_CALL(cudaFree(d_dA));
CHECK_CALL(cudaFree(d_dB));
} |
f77a6c9bf0703cdc68f088cc0aa915deed8534fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Your job is to implemment a bitonic sort. A description of the bitonic sort
// can be see at:
// http://en.wikipedia.org/wiki/Bitonic_sort
__device__
int powerOfTwo(const short N) {
switch( N ) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 8;
case 4:
return 16;
case 5:
return 32;
case 6:
return 64;
default:
return 1;
}
}
__device__
void compareFloatAndSwap(float * data, const int x, const int y) {
float temp = 0;
if ( data[y] < data[x] ) {
temp = data[x];
data[x] = data[y];
data[y] = temp;
}
}
__global__ void batcherBitonicMergesort64(float * d_out, const float * d_in)
{
// you are guaranteed this is called with <<<1, 64, 64*4>>>
extern __shared__ float data[];
int tid = threadIdx.x;
data[tid] = d_in[tid];
__syncthreads();
int _pow1, _pow2;
for (int stage = 0; stage <= 5; stage++)
{
_pow1 = powerOfTwo(stage + 1);
for (int substage = stage; substage >= 0; substage--)
{
_pow2 = powerOfTwo(substage);
if ( (tid/_pow1) % 2 ) {
if( (tid/_pow2) % 2 ) compareFloatAndSwap(data, tid, tid - _pow2);
} else {
if( (tid/_pow2) % 2 == 0 ) compareFloatAndSwap(data, tid, tid + _pow2);
}
__syncthreads();
}
}
d_out[tid] = data[tid];
}
int compareFloat (const void * a, const void * b)
{
if ( *(float*)a < *(float*)b ) return -1;
if ( *(float*)a == *(float*)b ) return 0;
if ( *(float*)a > *(float*)b ) return 1;
return 0; // should never reach this
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float h_sorted[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX;
h_sorted[i] = h_in[i];
}
qsort(h_sorted, ARRAY_SIZE, sizeof(float), compareFloat);
// declare GPU memory pointers
float * d_in, * d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( batcherBitonicMergesort64), dim3(1), dim3(ARRAY_SIZE), ARRAY_SIZE * sizeof(float), 0, d_out, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
// copy back the sum from GPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// compare your result against the reference solution
compare(h_out, h_sorted, ARRAY_SIZE);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
} | f77a6c9bf0703cdc68f088cc0aa915deed8534fe.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Your job is to implemment a bitonic sort. A description of the bitonic sort
// can be see at:
// http://en.wikipedia.org/wiki/Bitonic_sort
__device__
int powerOfTwo(const short N) {
switch( N ) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 8;
case 4:
return 16;
case 5:
return 32;
case 6:
return 64;
default:
return 1;
}
}
__device__
void compareFloatAndSwap(float * data, const int x, const int y) {
float temp = 0;
if ( data[y] < data[x] ) {
temp = data[x];
data[x] = data[y];
data[y] = temp;
}
}
__global__ void batcherBitonicMergesort64(float * d_out, const float * d_in)
{
// you are guaranteed this is called with <<<1, 64, 64*4>>>
extern __shared__ float data[];
int tid = threadIdx.x;
data[tid] = d_in[tid];
__syncthreads();
int _pow1, _pow2;
for (int stage = 0; stage <= 5; stage++)
{
_pow1 = powerOfTwo(stage + 1);
for (int substage = stage; substage >= 0; substage--)
{
_pow2 = powerOfTwo(substage);
if ( (tid/_pow1) % 2 ) {
if( (tid/_pow2) % 2 ) compareFloatAndSwap(data, tid, tid - _pow2);
} else {
if( (tid/_pow2) % 2 == 0 ) compareFloatAndSwap(data, tid, tid + _pow2);
}
__syncthreads();
}
}
d_out[tid] = data[tid];
}
int compareFloat (const void * a, const void * b)
{
if ( *(float*)a < *(float*)b ) return -1;
if ( *(float*)a == *(float*)b ) return 0;
if ( *(float*)a > *(float*)b ) return 1;
return 0; // should never reach this
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float h_sorted[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX;
h_sorted[i] = h_in[i];
}
qsort(h_sorted, ARRAY_SIZE, sizeof(float), compareFloat);
// declare GPU memory pointers
float * d_in, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
GpuTimer timer;
timer.Start();
batcherBitonicMergesort64<<<1, ARRAY_SIZE, ARRAY_SIZE * sizeof(float)>>>(d_out, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
// copy back the sum from GPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// compare your result against the reference solution
compare(h_out, h_sorted, ARRAY_SIZE);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
} |
6b3da2a7aa7a8c324daa0f20b470b5c106d2c246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/local_response_norm_impl.cuh"
#include "include/hip/hip_fp16.h"
template <typename T>
__global__ void ComputeScaleNHWC(const T *input, const int depth_radius, const float bias, const float alpha,
const size_t channels, const size_t num_elements, float *scale) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < num_elements; pos += blockDim.x * gridDim.x) {
const int posc = static_cast<int>(pos % channels);
float sqr_sum = 0;
for (int i = -depth_radius; i < depth_radius + 1; i++) {
if (posc + i >= 0 && posc + i < static_cast<int>(channels)) {
float a = static_cast<float>(input[pos + i]);
sqr_sum += a * a;
}
}
scale[pos] = bias + alpha * sqr_sum;
}
return;
}
template <typename T>
__global__ void LocalResponseNormNHWC(const T *input, const float *scale, const float beta, const size_t num_elements,
T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < num_elements; pos += blockDim.x * gridDim.x) {
float z = expf(logf(scale[pos]) * -beta);
output[pos] = input[pos] * static_cast<T>(z);
}
return;
}
template <typename T>
__global__ void LocalResponseNormGradNHWC(const T *dy, const T *x, const T *y, const float *scale,
const int depth_radius, const float alpha, const float beta, const float neg2_alpha_beta, const size_t channels,
const size_t num_elements, T *dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < num_elements; pos += blockDim.x * gridDim.x) {
const int posc = static_cast<int>(pos % channels);
float ratio_sum = 0;
for (int i = -depth_radius; i <= depth_radius; i++) {
if (posc + i >= 0 && posc + i < static_cast<int>(channels)) {
ratio_sum += static_cast<float>(dy[pos + i] * y[pos + i]) / scale[pos + i];
}
}
float z = expf(logf(scale[pos]) * -beta);
float ratio_2ab = ratio_sum * neg2_alpha_beta;
dx[pos] = dy[pos] * static_cast<T>(z) + x[pos] * static_cast<T>(ratio_2ab);
}
return;
}
template <typename T>
void CalLocalResponseNormNHWC(const T *input, const int depth_radius, const float bias, const float alpha,
const float beta, const size_t channels, const size_t num_elements, float *scale, T *output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ComputeScaleNHWC), dim3(GET_BLOCKS(num_elements)), dim3(GET_THREADS), 0, cuda_stream, input, depth_radius, bias, alpha,
channels, num_elements, scale);
hipLaunchKernelGGL(( LocalResponseNormNHWC), dim3(GET_BLOCKS(num_elements)), dim3(GET_THREADS), 0, cuda_stream, input, scale, beta, num_elements,
output);
return;
}
template <typename T>
void CalLocalResponseNormGradNHWC(const T *dy, const T *x, const T *y, const int depth_radius, const float bias,
const float alpha, const float beta, const size_t channels, const size_t num_elements, float *scale, T *dx,
hipStream_t cuda_stream) {
float neg2_alpha_beta = -2.0f * alpha * beta;
hipLaunchKernelGGL(( ComputeScaleNHWC), dim3(GET_BLOCKS(num_elements)), dim3(GET_THREADS), 0, cuda_stream, x, depth_radius, bias, alpha, channels,
num_elements, scale);
hipLaunchKernelGGL(( LocalResponseNormGradNHWC), dim3(GET_BLOCKS(num_elements)), dim3(GET_THREADS), 0, cuda_stream, dy, x, y, scale, depth_radius,
alpha, beta, neg2_alpha_beta, channels, num_elements, dx);
return;
}
template CUDA_LIB_EXPORT void CalLocalResponseNormNHWC<float>(const float *input, const int depth_radius,
const float bias, const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, float *output, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalLocalResponseNormNHWC<half>(const half *input, const int depth_radius,
const float bias, const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, half *output, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalLocalResponseNormGradNHWC<float>(const float *dy, const float *x, const float *y,
const int depth_radius, const float bias,
const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, float *dx, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalLocalResponseNormGradNHWC<half>(const half *dy, const half *x, const half *y,
const int depth_radius, const float bias,
const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, half *dx, hipStream_t cuda_stream);
| 6b3da2a7aa7a8c324daa0f20b470b5c106d2c246.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/local_response_norm_impl.cuh"
#include "include/cuda_fp16.h"
template <typename T>
__global__ void ComputeScaleNHWC(const T *input, const int depth_radius, const float bias, const float alpha,
const size_t channels, const size_t num_elements, float *scale) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < num_elements; pos += blockDim.x * gridDim.x) {
const int posc = static_cast<int>(pos % channels);
float sqr_sum = 0;
for (int i = -depth_radius; i < depth_radius + 1; i++) {
if (posc + i >= 0 && posc + i < static_cast<int>(channels)) {
float a = static_cast<float>(input[pos + i]);
sqr_sum += a * a;
}
}
scale[pos] = bias + alpha * sqr_sum;
}
return;
}
template <typename T>
__global__ void LocalResponseNormNHWC(const T *input, const float *scale, const float beta, const size_t num_elements,
T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < num_elements; pos += blockDim.x * gridDim.x) {
float z = expf(logf(scale[pos]) * -beta);
output[pos] = input[pos] * static_cast<T>(z);
}
return;
}
template <typename T>
__global__ void LocalResponseNormGradNHWC(const T *dy, const T *x, const T *y, const float *scale,
const int depth_radius, const float alpha, const float beta, const float neg2_alpha_beta, const size_t channels,
const size_t num_elements, T *dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < num_elements; pos += blockDim.x * gridDim.x) {
const int posc = static_cast<int>(pos % channels);
float ratio_sum = 0;
for (int i = -depth_radius; i <= depth_radius; i++) {
if (posc + i >= 0 && posc + i < static_cast<int>(channels)) {
ratio_sum += static_cast<float>(dy[pos + i] * y[pos + i]) / scale[pos + i];
}
}
float z = expf(logf(scale[pos]) * -beta);
float ratio_2ab = ratio_sum * neg2_alpha_beta;
dx[pos] = dy[pos] * static_cast<T>(z) + x[pos] * static_cast<T>(ratio_2ab);
}
return;
}
template <typename T>
void CalLocalResponseNormNHWC(const T *input, const int depth_radius, const float bias, const float alpha,
const float beta, const size_t channels, const size_t num_elements, float *scale, T *output,
cudaStream_t cuda_stream) {
ComputeScaleNHWC<<<GET_BLOCKS(num_elements), GET_THREADS, 0, cuda_stream>>>(input, depth_radius, bias, alpha,
channels, num_elements, scale);
LocalResponseNormNHWC<<<GET_BLOCKS(num_elements), GET_THREADS, 0, cuda_stream>>>(input, scale, beta, num_elements,
output);
return;
}
template <typename T>
void CalLocalResponseNormGradNHWC(const T *dy, const T *x, const T *y, const int depth_radius, const float bias,
const float alpha, const float beta, const size_t channels, const size_t num_elements, float *scale, T *dx,
cudaStream_t cuda_stream) {
float neg2_alpha_beta = -2.0f * alpha * beta;
ComputeScaleNHWC<<<GET_BLOCKS(num_elements), GET_THREADS, 0, cuda_stream>>>(x, depth_radius, bias, alpha, channels,
num_elements, scale);
LocalResponseNormGradNHWC<<<GET_BLOCKS(num_elements), GET_THREADS, 0, cuda_stream>>>(dy, x, y, scale, depth_radius,
alpha, beta, neg2_alpha_beta, channels, num_elements, dx);
return;
}
template CUDA_LIB_EXPORT void CalLocalResponseNormNHWC<float>(const float *input, const int depth_radius,
const float bias, const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, float *output, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalLocalResponseNormNHWC<half>(const half *input, const int depth_radius,
const float bias, const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, half *output, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalLocalResponseNormGradNHWC<float>(const float *dy, const float *x, const float *y,
const int depth_radius, const float bias,
const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, float *dx, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalLocalResponseNormGradNHWC<half>(const half *dy, const half *x, const half *y,
const int depth_radius, const float bias,
const float alpha, const float beta,
const size_t channels, const size_t num_elements,
float *scale, half *dx, cudaStream_t cuda_stream);
|
acaa83bf0778b0963959f9de1e880eac6de4a733.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_common.h"
__global__ void scale_matrix_columns_gpu_kernel
(
int nrow,
hipDoubleComplex* mtrx,
double* a
)
{
int icol = blockIdx.y;
int irow = blockIdx.x * blockDim.x + threadIdx.x;
if (irow < nrow)
{
mtrx[array2D_offset(irow, icol, nrow)] =
cuCmul(mtrx[array2D_offset(irow, icol, nrow)], make_cuDoubleComplex(a[icol], 0));
}
}
// scale each column of the matrix by a column-dependent constant
extern "C" void scale_matrix_columns_gpu(int nrow,
int ncol,
hipDoubleComplex* mtrx,
double* a)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow, grid_t.x), ncol);
hipLaunchKernelGGL(( scale_matrix_columns_gpu_kernel) , dim3(grid_b), dim3(grid_t), 0, 0,
nrow,
mtrx,
a
);
}
__global__ void scale_matrix_rows_gpu_kernel
(
int nrow__,
hipDoubleComplex* mtrx__,
double const* v__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
hipDoubleComplex z = mtrx__[array2D_offset(irow, icol, nrow__)];
mtrx__[array2D_offset(irow, icol, nrow__)] = make_cuDoubleComplex(z.x * v__[irow], z.y * v__[irow]);
}
}
// scale each row of the matrix by a row-dependent constant
extern "C" void scale_matrix_rows_gpu(int nrow__,
int ncol__,
hipDoubleComplex* mtrx__,
double const* v__)
{
dim3 grid_t(256);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
hipLaunchKernelGGL(( scale_matrix_rows_gpu_kernel) , dim3(grid_b), dim3(grid_t), 0, 0,
nrow__,
mtrx__,
v__
);
}
__global__ void scale_matrix_elements_gpu_kernel
(
hipDoubleComplex* mtrx__,
int ld__,
int nrow__,
double beta__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
hipDoubleComplex z = mtrx__[array2D_offset(irow, icol, ld__)];
mtrx__[array2D_offset(irow, icol, ld__)] = make_cuDoubleComplex(z.x * beta__, z.y * beta__);
}
}
extern "C" void scale_matrix_elements_gpu(hipDoubleComplex* ptr__,
int ld__,
int nrow__,
int ncol__,
double beta__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
hipLaunchKernelGGL(( scale_matrix_elements_gpu_kernel) , dim3(grid_b), dim3(grid_t), 0, 0,
ptr__,
ld__,
nrow__,
beta__
);
}
| acaa83bf0778b0963959f9de1e880eac6de4a733.cu | #include "cuda_common.h"
__global__ void scale_matrix_columns_gpu_kernel
(
int nrow,
cuDoubleComplex* mtrx,
double* a
)
{
int icol = blockIdx.y;
int irow = blockIdx.x * blockDim.x + threadIdx.x;
if (irow < nrow)
{
mtrx[array2D_offset(irow, icol, nrow)] =
cuCmul(mtrx[array2D_offset(irow, icol, nrow)], make_cuDoubleComplex(a[icol], 0));
}
}
// scale each column of the matrix by a column-dependent constant
extern "C" void scale_matrix_columns_gpu(int nrow,
int ncol,
cuDoubleComplex* mtrx,
double* a)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow, grid_t.x), ncol);
scale_matrix_columns_gpu_kernel <<<grid_b, grid_t>>>
(
nrow,
mtrx,
a
);
}
__global__ void scale_matrix_rows_gpu_kernel
(
int nrow__,
cuDoubleComplex* mtrx__,
double const* v__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
cuDoubleComplex z = mtrx__[array2D_offset(irow, icol, nrow__)];
mtrx__[array2D_offset(irow, icol, nrow__)] = make_cuDoubleComplex(z.x * v__[irow], z.y * v__[irow]);
}
}
// scale each row of the matrix by a row-dependent constant
extern "C" void scale_matrix_rows_gpu(int nrow__,
int ncol__,
cuDoubleComplex* mtrx__,
double const* v__)
{
dim3 grid_t(256);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
scale_matrix_rows_gpu_kernel <<<grid_b, grid_t>>>
(
nrow__,
mtrx__,
v__
);
}
__global__ void scale_matrix_elements_gpu_kernel
(
cuDoubleComplex* mtrx__,
int ld__,
int nrow__,
double beta__
)
{
int icol = blockIdx.y;
int irow = blockDim.x * blockIdx.x + threadIdx.x;
if (irow < nrow__) {
cuDoubleComplex z = mtrx__[array2D_offset(irow, icol, ld__)];
mtrx__[array2D_offset(irow, icol, ld__)] = make_cuDoubleComplex(z.x * beta__, z.y * beta__);
}
}
extern "C" void scale_matrix_elements_gpu(cuDoubleComplex* ptr__,
int ld__,
int nrow__,
int ncol__,
double beta__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(nrow__, grid_t.x), ncol__);
scale_matrix_elements_gpu_kernel <<<grid_b, grid_t>>>
(
ptr__,
ld__,
nrow__,
beta__
);
}
|
0f4dc8aa9bff429d49a7084b3214ba404bd1d779.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "device.hpp"
//#include "../internal.h"
using namespace pcl::device;
namespace pcl
{
namespace device
{
template<typename T>
__global__ void
initializeVolume (PtrStep<T> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
T *pos = volume.ptr(y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z, pos+=z_step)
pack_tsdf (0.f, 0, *pos);
}
}
}
}
void
pcl::device::initVolume (PtrStep<short2> volume)
{
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
hipLaunchKernelGGL(( initializeVolume), dim3(grid), dim3(block), 0, 0, volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
struct Tsdf
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8,
MAX_WEIGHT = 1 << 7
};
mutable PtrStep<short2> volume;
float3 cell_size;
Intr intr;
Mat33 Rcurr_inv;
float3 tcurr;
PtrStepSz<ushort> depth_raw; //depth in mm
float tranc_dist_mm;
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(*pos);
for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step)
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
//transform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x];
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm
sdf *= (-1);
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
}
}
};
__global__ void
integrateTsdfKernel (const Tsdf tsdf) {
tsdf ();
}
__global__ void
tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr,
const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z;
float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z;
float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z;
//#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z)
{
float3 vr;
vr.x = v_g_x;
vr.y = v_g_y;
vr.z = (v_g_z + z * cell_size.z);
float3 v;
v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z;
v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z;
v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1.f);
float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
pos += elem_step;
} /* for(int z = 0; z < VOLUME_Z; ++z) */
} /* __global__ */
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size,
const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist,
PtrStep<short2> volume)
{
Tsdf tsdf;
tsdf.volume = volume;
tsdf.cell_size.x = volume_size.x / VOLUME_X;
tsdf.cell_size.y = volume_size.y / VOLUME_Y;
tsdf.cell_size.z = volume_size.z / VOLUME_Z;
tsdf.intr = intr;
tsdf.Rcurr_inv = Rcurr_inv;
tsdf.tcurr = tcurr;
tsdf.depth_raw = depth_raw;
tsdf.tranc_dist_mm = tranc_dist*1000; //mm
dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
#if 0
//tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size);
hipLaunchKernelGGL(( integrateTsdfKernel), dim3(grid), dim3(block), 0, 0, tsdf);
#endif
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
__global__ void
scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
int Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
__global__ void
tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
__global__ void
tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
bool integrate = true;
if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2))
{
const float qnan = numeric_limits<float>::quiet_NaN();
float3 normal = make_float3(qnan, qnan, qnan);
float Fn, Fp;
int Wn = 0, Wp = 0;
unpack_tsdf (*(pos + elem_step), Fn, Wn);
unpack_tsdf (*(pos - elem_step), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.z = (Fn - Fp)/cell_size.z;
unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn);
unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.y = (Fn - Fp)/cell_size.y;
unpack_tsdf (*(pos + 1), Fn, Wn);
unpack_tsdf (*(pos - 1), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.x = (Fn - Fp)/cell_size.x;
if (normal.x != qnan && normal.y != qnan && normal.z != qnan)
{
float norm2 = dot(normal, normal);
if (norm2 >= 1e-10)
{
normal *= rsqrt(norm2);
float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z;
float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z);
if (cosine < 0.5)
integrate = false;
}
}
}
if (integrate)
{
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr,
const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr,
float tranc_dist,
PtrStep<short2> volume, DeviceArray2D<float>& depthScaled)
{
depthScaled.create (depth.rows, depth.cols);
dim3 block_scale (32, 8);
dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y));
//scales depth along ray and converts mm -> meters.
hipLaunchKernelGGL(( scaleDepth), dim3(grid_scale), dim3(block_scale), 0, 0, depth, depthScaled, intr);
cudaSafeCall ( hipGetLastError () );
float3 cell_size;
cell_size.x = volume_size.x / VOLUME_X;
cell_size.y = volume_size.y / VOLUME_Y;
cell_size.z = volume_size.z / VOLUME_Z;
//dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 block (16, 16);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
hipLaunchKernelGGL(( tsdf23), dim3(grid), dim3(block), 0, 0, depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
//tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| 0f4dc8aa9bff429d49a7084b3214ba404bd1d779.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "device.hpp"
//#include "../internal.h"
using namespace pcl::device;
namespace pcl
{
namespace device
{
template<typename T>
__global__ void
initializeVolume (PtrStep<T> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
T *pos = volume.ptr(y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z, pos+=z_step)
pack_tsdf (0.f, 0, *pos);
}
}
}
}
void
pcl::device::initVolume (PtrStep<short2> volume)
{
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
initializeVolume<<<grid, block>>>(volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
struct Tsdf
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8,
MAX_WEIGHT = 1 << 7
};
mutable PtrStep<short2> volume;
float3 cell_size;
Intr intr;
Mat33 Rcurr_inv;
float3 tcurr;
PtrStepSz<ushort> depth_raw; //depth in mm
float tranc_dist_mm;
__device__ __forceinline__ float3
getVoxelGCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(*pos);
for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step)
{
float3 v_g = getVoxelGCoo (x, y, z); //3 // p
//transform to curr cam coo space
float3 v = Rcurr_inv * (v_g - tcurr); //4
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x];
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1);
float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm
sdf *= (-1);
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
}
}
};
__global__ void
integrateTsdfKernel (const Tsdf tsdf) {
tsdf ();
}
__global__ void
tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr,
const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
short2 *pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z;
float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z;
float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z;
//#pragma unroll
for (int z = 0; z < VOLUME_Z; ++z)
{
float3 vr;
vr.x = v_g_x;
vr.y = v_g_y;
vr.z = (v_g_z + z * cell_size.z);
float3 v;
v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z;
v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z;
v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z;
int2 coo; //project to current cam
coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx);
coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy);
if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6
{
int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm
if (Dp != 0)
{
float xl = (coo.x - intr.cx) / intr.fx;
float yl = (coo.y - intr.cy) / intr.fy;
float lambda_inv = rsqrtf (xl * xl + yl * yl + 1.f);
float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm
if (sdf >= -tranc_dist_mm)
{
float tsdf = fmin (1.f, sdf / tranc_dist_mm);
int weight_prev;
float tsdf_prev;
//read and unpack
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
pos += elem_step;
} /* for(int z = 0; z < VOLUME_Z; ++z) */
} /* __global__ */
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size,
const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist,
PtrStep<short2> volume)
{
Tsdf tsdf;
tsdf.volume = volume;
tsdf.cell_size.x = volume_size.x / VOLUME_X;
tsdf.cell_size.y = volume_size.y / VOLUME_Y;
tsdf.cell_size.z = volume_size.z / VOLUME_Z;
tsdf.intr = intr;
tsdf.Rcurr_inv = Rcurr_inv;
tsdf.tcurr = tcurr;
tsdf.depth_raw = depth_raw;
tsdf.tranc_dist_mm = tranc_dist*1000; //mm
dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
#if 0
//tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size);
integrateTsdfKernel<<<grid, block>>>(tsdf);
#endif
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
namespace pcl
{
namespace device
{
__global__ void
scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
int Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
__global__ void
tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
__global__ void
tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
short2* pos = volume.ptr (y) + x;
int elem_step = volume.step * VOLUME_Y / sizeof(short2);
//#pragma unroll
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled,
pos += elem_step)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
bool integrate = true;
if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2))
{
const float qnan = numeric_limits<float>::quiet_NaN();
float3 normal = make_float3(qnan, qnan, qnan);
float Fn, Fp;
int Wn = 0, Wp = 0;
unpack_tsdf (*(pos + elem_step), Fn, Wn);
unpack_tsdf (*(pos - elem_step), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.z = (Fn - Fp)/cell_size.z;
unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn);
unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.y = (Fn - Fp)/cell_size.y;
unpack_tsdf (*(pos + 1), Fn, Wn);
unpack_tsdf (*(pos - 1), Fp, Wp);
if (Wn > 16 && Wp > 16)
normal.x = (Fn - Fp)/cell_size.x;
if (normal.x != qnan && normal.y != qnan && normal.z != qnan)
{
float norm2 = dot(normal, normal);
if (norm2 >= 1e-10)
{
normal *= rsqrt(norm2);
float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z;
float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z);
if (cosine < 0.5)
integrate = false;
}
}
}
if (integrate)
{
//read and unpack
float tsdf_prev;
int weight_prev;
unpack_tsdf (*pos, tsdf_prev, weight_prev);
const int Wrk = 1;
float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk);
int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT);
pack_tsdf (tsdf_new, weight_new, *pos);
}
}
}
} // for(int z = 0; z < VOLUME_Z; ++z)
} // __global__
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr,
const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr,
float tranc_dist,
PtrStep<short2> volume, DeviceArray2D<float>& depthScaled)
{
depthScaled.create (depth.rows, depth.cols);
dim3 block_scale (32, 8);
dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y));
//scales depth along ray and converts mm -> meters.
scaleDepth<<<grid_scale, block_scale>>>(depth, depthScaled, intr);
cudaSafeCall ( cudaGetLastError () );
float3 cell_size;
cell_size.x = volume_size.x / VOLUME_X;
cell_size.y = volume_size.y / VOLUME_Y;
cell_size.z = volume_size.z / VOLUME_Z;
//dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y);
dim3 block (16, 16);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
tsdf23<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
//tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
a6ac1f252c4e630cef51badb2116e0ffadd53781.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_computePSF_signalN2Many.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int sizePart = XSIZE*YSIZE;
int sizeTot = XSIZE*YSIZE;
double divide = 1;
int *sparseIndexEvenShiftOutput = NULL;
hipMalloc(&sparseIndexEvenShiftOutput, XSIZE*YSIZE);
int *sparseIndexOddShiftOutput = NULL;
hipMalloc(&sparseIndexOddShiftOutput, XSIZE*YSIZE);
double *fft = NULL;
hipMalloc(&fft, XSIZE*YSIZE);
int *sparseIndexEvenShiftOutputNext = NULL;
hipMalloc(&sparseIndexEvenShiftOutputNext, XSIZE*YSIZE);
int *sparseIndexOddShiftOutputNext = NULL;
hipMalloc(&sparseIndexOddShiftOutputNext, XSIZE*YSIZE);
double *psffft = NULL;
hipMalloc(&psffft, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_computePSF_signalN2Many), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizePart,sizeTot,divide,sparseIndexEvenShiftOutput,sparseIndexOddShiftOutput,fft,sparseIndexEvenShiftOutputNext,sparseIndexOddShiftOutputNext,psffft);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_computePSF_signalN2Many), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizePart,sizeTot,divide,sparseIndexEvenShiftOutput,sparseIndexOddShiftOutput,fft,sparseIndexEvenShiftOutputNext,sparseIndexOddShiftOutputNext,psffft);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_computePSF_signalN2Many), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizePart,sizeTot,divide,sparseIndexEvenShiftOutput,sparseIndexOddShiftOutput,fft,sparseIndexEvenShiftOutputNext,sparseIndexOddShiftOutputNext,psffft);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a6ac1f252c4e630cef51badb2116e0ffadd53781.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_computePSF_signalN2Many.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int sizePart = XSIZE*YSIZE;
int sizeTot = XSIZE*YSIZE;
double divide = 1;
int *sparseIndexEvenShiftOutput = NULL;
cudaMalloc(&sparseIndexEvenShiftOutput, XSIZE*YSIZE);
int *sparseIndexOddShiftOutput = NULL;
cudaMalloc(&sparseIndexOddShiftOutput, XSIZE*YSIZE);
double *fft = NULL;
cudaMalloc(&fft, XSIZE*YSIZE);
int *sparseIndexEvenShiftOutputNext = NULL;
cudaMalloc(&sparseIndexEvenShiftOutputNext, XSIZE*YSIZE);
int *sparseIndexOddShiftOutputNext = NULL;
cudaMalloc(&sparseIndexOddShiftOutputNext, XSIZE*YSIZE);
double *psffft = NULL;
cudaMalloc(&psffft, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_computePSF_signalN2Many<<<gridBlock,threadBlock>>>(n,sizePart,sizeTot,divide,sparseIndexEvenShiftOutput,sparseIndexOddShiftOutput,fft,sparseIndexEvenShiftOutputNext,sparseIndexOddShiftOutputNext,psffft);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_computePSF_signalN2Many<<<gridBlock,threadBlock>>>(n,sizePart,sizeTot,divide,sparseIndexEvenShiftOutput,sparseIndexOddShiftOutput,fft,sparseIndexEvenShiftOutputNext,sparseIndexOddShiftOutputNext,psffft);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_computePSF_signalN2Many<<<gridBlock,threadBlock>>>(n,sizePart,sizeTot,divide,sparseIndexEvenShiftOutput,sparseIndexOddShiftOutput,fft,sparseIndexEvenShiftOutputNext,sparseIndexOddShiftOutputNext,psffft);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
63b2ae464ef145704c4688e1717b2c51a3df93b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <omp.h>
#include <stdio.h>
const int device = 0;
// kernel function
__global__ void my_kernel() {
//
int thread_i = threadIdx.x;
int thread_max = blockDim.x;
int block_i = blockIdx.x;
int glo_thread_i = blockDim.x * blockIdx.x + threadIdx.x;
int glo_thread_i_max = gridDim.x * blockDim.x;
printf("Hello world! I'm thread %i out of %i in block %i. My global thread id is %i out of %i.\n", thread_i,thread_max,block_i,glo_thread_i,glo_thread_i_max);
}
int main(int argc, char *argv[]) {
// Wake up GPU from power save state.
printf("Warming up device %i ... ", device); fflush(stdout);
double time = omp_get_wtime();
hipSetDevice(device); // Set the device to 0 or 1.
double *dummy_d;
hipMalloc((void**)&dummy_d, 0); // We force the creation of context on the
// device by allocating a dummy variable.
printf("time = %3.2f seconds\n", omp_get_wtime() - time);
// program
int n_blk, n_threads;
if (argc == 3 ) {
n_blk = atoi(argv[1]);
n_threads = atoi(argv[2]);
}
else {
// use default N
n_blk = 1;
n_threads = 32;
}
//
printf("n_blk %i ; n_threads %i\n",n_blk, n_threads);
hipLaunchKernelGGL(( my_kernel), dim3(n_blk),dim3(n_threads), 0, 0, );
hipDeviceSynchronize();
} | 63b2ae464ef145704c4688e1717b2c51a3df93b4.cu | #include <omp.h>
#include <stdio.h>
const int device = 0;
// kernel function
__global__ void my_kernel() {
//
int thread_i = threadIdx.x;
int thread_max = blockDim.x;
int block_i = blockIdx.x;
int glo_thread_i = blockDim.x * blockIdx.x + threadIdx.x;
int glo_thread_i_max = gridDim.x * blockDim.x;
printf("Hello world! I'm thread %i out of %i in block %i. My global thread id is %i out of %i.\n", thread_i,thread_max,block_i,glo_thread_i,glo_thread_i_max);
}
int main(int argc, char *argv[]) {
// Wake up GPU from power save state.
printf("Warming up device %i ... ", device); fflush(stdout);
double time = omp_get_wtime();
cudaSetDevice(device); // Set the device to 0 or 1.
double *dummy_d;
cudaMalloc((void**)&dummy_d, 0); // We force the creation of context on the
// device by allocating a dummy variable.
printf("time = %3.2f seconds\n", omp_get_wtime() - time);
// program
int n_blk, n_threads;
if (argc == 3 ) {
n_blk = atoi(argv[1]);
n_threads = atoi(argv[2]);
}
else {
// use default N
n_blk = 1;
n_threads = 32;
}
//
printf("n_blk %i ; n_threads %i\n",n_blk, n_threads);
my_kernel<<<n_blk,n_threads>>>();
cudaDeviceSynchronize();
} |
84ec46f09afad849e76703f2f8ffcd8a079da0fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <ATen/native/ComplexHelper.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THH/THHGeneral.h>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_normal4(state); },
normal_func);
}
});
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
if(self.is_complex()) {
// note: float_tensor lives only as long as the self tensor lives
auto float_tensor = at::native::view_complex_as_float(self);
// variance for normal distribution of the real and imaginary values
// is half of the input variance
normal_cuda_(float_tensor, mean, std/(std::sqrt(2)), gen);
return self;
}
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({}, mean, output.options());
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
output.mul_(std).add_(mean_tensor);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
bool is_deprecated_th_impl = resize_output_for_normal(output, mean, std);
normal_cuda_(output, 0, 1, gen);
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean + mean * std instead of mean + output * std
if (is_deprecated_th_impl) {
output.mul_(std.reshape(mean.sizes())).add_(mean);
}
else {
output.mul_(std).add_(mean);
}
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(std, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty({0}, mean.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
}} // namespace at::native
| 84ec46f09afad849e76703f2f8ffcd8a079da0fa.cu | #include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGenerator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <ATen/native/ComplexHelper.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <THC/THCGeneral.h>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void normal_kernel_cuda(TensorIterator& iter, double mean_, double std_, Generator* gen_) {
auto gen = get_generator_or_default<CUDAGenerator>(gen_, cuda::detail::getDefaultCUDAGenerator());
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto mean = static_cast<accscalar_t>(mean_);
auto std = static_cast<accscalar_t>(std_);
// define lambda to multiply std and add mean
auto normal_func = [mean, std] __device__ (accscalar_t rand) {
return static_cast<scalar_t>(rand * std + mean);
};
if (std::is_same<scalar_t, double>::value) {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); },
normal_func);
} else {
distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter,
gen,
[] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); },
normal_func);
}
});
}
Tensor& normal_cuda_(Tensor& self, double mean, double std, Generator* gen) {
TORCH_CHECK(std > 0.0, "normal_ expects std > 0.0, but found std=", std);
if(self.is_complex()) {
// note: float_tensor lives only as long as the self tensor lives
auto float_tensor = at::native::view_complex_as_float(self);
// variance for normal distribution of the real and imaginary values
// is half of the input variance
normal_cuda_(float_tensor, mean, std/(std::sqrt(2)), gen);
return self;
}
auto iter = TensorIterator::nullary_op(self);
normal_kernel_cuda(iter, mean, std, gen);
return self;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, double std, Generator* gen) {
normal_cuda_(output, 0, std, gen);
output.add_(mean);
return output;
}
Tensor& normal_out_cuda(Tensor& output, double mean, const Tensor& std, Generator* gen) {
normal_cuda_(output, 0, 1, gen);
auto mean_tensor = at::full({}, mean, output.options());
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
output.mul_(std).add_(mean_tensor);
return output;
}
Tensor& normal_out_cuda(Tensor& output, const Tensor& mean, const Tensor& std, Generator* gen) {
bool is_deprecated_th_impl = resize_output_for_normal(output, mean, std);
normal_cuda_(output, 0, 1, gen);
// NB: addcmul_out copies the tensor to be added into the output.
// Please look at aten/src/THC/generic/THCTensorMathPointwise.cu
// The previous function here was addcmul_out(output, mean, output, std, 1);
// The third argument is not a constant reference and hence the samples in output are overwritten.
// Consequently, the computation performed is mean + mean * std instead of mean + output * std
if (is_deprecated_th_impl) {
output.mul_(std.reshape(mean.sizes())).add_(mean);
}
else {
output.mul_(std).add_(mean);
}
return output;
}
Tensor normal_cuda(const Tensor& mean, double std, Generator* gen) {
Tensor ret = at::empty_like(mean, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(double mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty_like(std, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
Tensor normal_cuda(const Tensor& mean, const Tensor& std, Generator* gen) {
Tensor ret = at::empty({0}, mean.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
normal_out_cuda(ret, mean, std, gen);
return ret;
}
}} // namespace at::native
|
f5c10688c1008fe117eb3fa7a8d0067204066e1b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2018, Cranfield University
// All rights reserved
// Author: Salvatore Filippone
#include <iostream>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
const int VERSION=9;
// Computes the reduction using the CPU.
double Reduction(int n, const double* x) {
double result = 0.0f;
for (int i = 0; i < n; ++i) {
result += x[i] ;
}
return result;
}
// Memory management for device side
const int BLOCKS_PER_MP = 32; // Sufficiently large for memory transaction hiding
int thread_block = 0; // Must be a power of 2 >= 64
int max_blocks=0; // Blocks in a grid
int red_sz=0; // Size of reduction buffer
double *o_data=NULL, *d_res_data=NULL, *h_res_data=NULL;
static struct hipDeviceProp_t *prop=NULL;
void reduce_alloc_wrk()
{
int mpCnt;
if (prop == NULL) {
if ((prop=(struct hipDeviceProp_t *) malloc(sizeof(struct hipDeviceProp_t)))==NULL) {
fprintf(stderr,"CUDA Error gpuInit3: not malloced prop\n");
return;
}
hipSetDevice(0); // BEWARE: you may have more than one device
hipGetDeviceProperties(prop,0);
}
if (thread_block <= 0)
std::cerr << "thread_block must be a power of 2 between 64 and 1024" << std::endl;
if (max_blocks == 0) {
mpCnt = prop->multiProcessorCount;
max_blocks = mpCnt*BLOCKS_PER_MP;
// Enough to do the second-level reduction
red_sz = (max_blocks+thread_block-1)/thread_block;
//std::cerr << mpCnt << ' '<<max_blocks << ' '<<thread_block<< std::endl;
}
if (o_data == NULL) hipMalloc(&o_data,max_blocks*sizeof(double));
if (d_res_data == NULL) hipMalloc(&d_res_data,(red_sz)*sizeof(double));
if (h_res_data == NULL) h_res_data = (double *)malloc((red_sz)*sizeof(double));
}
// Fully unrolled. Assuming thread_block >= 64
__device__ void warpReduce(volatile double *sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
template <unsigned int THD> __global__ void reduce(int n, double *g_idata, double *g_odata) {
extern __shared__ double sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
sdata[tid] = 0.0;
while (i<n) {
sdata[tid] += g_idata[i] ;
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (THD >= 1024){ if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); }
if (THD >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (THD >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (THD >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
// write result for this block to global mem
if (tid < 32) warpReduce(sdata,tid);
if (tid == 0) g_odata[blockIdx.x] += sdata[0];
}
void do_gpu_reduce(int n, double *g_idata, double *g_odata)
{
const int shmem_size = thread_block*sizeof(double);
int nblocks = ((n + thread_block - 1) / thread_block);
if (nblocks > max_blocks) nblocks = max_blocks;
switch(thread_block) {
case 1024:
hipLaunchKernelGGL(( reduce<1024>), dim3(nblocks),dim3(1024),shmem_size,0, n,g_idata,g_odata); break;
case 512:
hipLaunchKernelGGL(( reduce<512>), dim3(nblocks),dim3(512),shmem_size,0, n,g_idata,g_odata); break;
case 256:
hipLaunchKernelGGL(( reduce<256>), dim3(nblocks),dim3(256),shmem_size,0, n,g_idata,g_odata); break;
case 128:
hipLaunchKernelGGL(( reduce<128>), dim3(nblocks),dim3(128),shmem_size,0, n,g_idata,g_odata); break;
case 64:
hipLaunchKernelGGL(( reduce<64>), dim3(nblocks),dim3(64),shmem_size,0, n,g_idata,g_odata); break;
default:
std::cerr << "thread_block must be a power of 2 between 64 and 1024" << std::endl;
}
return;
}
double gpu_reduce(int n, double *d_v)
{
reduce_alloc_wrk();
hipMemset((void *) o_data, 0, max_blocks*sizeof(double));
hipMemset((void *)d_res_data,0,(red_sz)*sizeof(double));
do_gpu_reduce(n, d_v, o_data);
do_gpu_reduce(max_blocks,o_data,d_res_data);
hipError_t err = hipMemcpy(h_res_data, d_res_data,
red_sz*sizeof(double), hipMemcpyDeviceToHost);
return(Reduction(red_sz,h_res_data));
}
// Returns a random number from range [0, 1).
double rand_double() {
return static_cast<double>(rand()) / RAND_MAX;
}
int main(int argc, char** argv) {
if (argc < 3) {
std::cerr << "Usage: " <<argv[0] << " N Threads_per_block" << std::endl;
exit(1);
}
int N = atoi(argv[1]);
thread_block = atoi(argv[2]);
switch(thread_block) {
case 1024:
case 512:
case 256:
case 128:
case 64:
break;
default:
std::cerr << "thread_block must be a power of 2 between 64 and 1024" << std::endl;
exit(1);
}
double bdwdth;
double *h_x=(double *) malloc(N*sizeof(double));
double *d_x;
srand(time(0));
for (int i=0; i<N; i++)
h_x[i]=rand_double();
hipError_t err=hipMalloc((void **)&d_x,(N*sizeof(double)));
err = hipMemcpy(d_x, h_x, N*sizeof(double), hipMemcpyHostToDevice);
reduce_alloc_wrk();
StopWatchInterface* timer = 0;
sdkCreateTimer(&timer);
std::cout << "Testing reduction algorithm " << VERSION << " on a DOUBLE vector of size: " << N << std::endl;
// Calculate the reduction on the host.
timer->start();
double cpu_sum = Reduction(N, h_x);
timer->stop();
std::cout << "CPU reduction: " << cpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
// ------------ GPU reduction
timer->reset();
timer->start();
double gpu_sum = gpu_reduce(N,d_x);
timer->stop();
bdwdth = ((double)N*sizeof(double))/timer->getTime();
bdwdth *= 1.e-6;
std::cout << "GPU reduction: " << gpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
std::cout << "Relative difference: " << abs(gpu_sum-cpu_sum)/gpu_sum << std::endl;
std::cout << "Measured bandwidth: " << bdwdth << " GB/s" << std::endl;
// ------------------------------- Cleaning up ------------------------------ //
delete timer;
checkCudaErrors(hipDeviceReset());
return 0;
}
| f5c10688c1008fe117eb3fa7a8d0067204066e1b.cu | // Copyright 2018, Cranfield University
// All rights reserved
// Author: Salvatore Filippone
#include <iostream>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
const int VERSION=9;
// Computes the reduction using the CPU.
double Reduction(int n, const double* x) {
double result = 0.0f;
for (int i = 0; i < n; ++i) {
result += x[i] ;
}
return result;
}
// Memory management for device side
const int BLOCKS_PER_MP = 32; // Sufficiently large for memory transaction hiding
int thread_block = 0; // Must be a power of 2 >= 64
int max_blocks=0; // Blocks in a grid
int red_sz=0; // Size of reduction buffer
double *o_data=NULL, *d_res_data=NULL, *h_res_data=NULL;
static struct cudaDeviceProp *prop=NULL;
void reduce_alloc_wrk()
{
int mpCnt;
if (prop == NULL) {
if ((prop=(struct cudaDeviceProp *) malloc(sizeof(struct cudaDeviceProp)))==NULL) {
fprintf(stderr,"CUDA Error gpuInit3: not malloced prop\n");
return;
}
cudaSetDevice(0); // BEWARE: you may have more than one device
cudaGetDeviceProperties(prop,0);
}
if (thread_block <= 0)
std::cerr << "thread_block must be a power of 2 between 64 and 1024" << std::endl;
if (max_blocks == 0) {
mpCnt = prop->multiProcessorCount;
max_blocks = mpCnt*BLOCKS_PER_MP;
// Enough to do the second-level reduction
red_sz = (max_blocks+thread_block-1)/thread_block;
//std::cerr << mpCnt << ' '<<max_blocks << ' '<<thread_block<< std::endl;
}
if (o_data == NULL) cudaMalloc(&o_data,max_blocks*sizeof(double));
if (d_res_data == NULL) cudaMalloc(&d_res_data,(red_sz)*sizeof(double));
if (h_res_data == NULL) h_res_data = (double *)malloc((red_sz)*sizeof(double));
}
// Fully unrolled. Assuming thread_block >= 64
__device__ void warpReduce(volatile double *sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
template <unsigned int THD> __global__ void reduce(int n, double *g_idata, double *g_odata) {
extern __shared__ double sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
sdata[tid] = 0.0;
while (i<n) {
sdata[tid] += g_idata[i] ;
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (THD >= 1024){ if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); }
if (THD >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (THD >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (THD >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
// write result for this block to global mem
if (tid < 32) warpReduce(sdata,tid);
if (tid == 0) g_odata[blockIdx.x] += sdata[0];
}
void do_gpu_reduce(int n, double *g_idata, double *g_odata)
{
const int shmem_size = thread_block*sizeof(double);
int nblocks = ((n + thread_block - 1) / thread_block);
if (nblocks > max_blocks) nblocks = max_blocks;
switch(thread_block) {
case 1024:
reduce<1024><<<nblocks,1024,shmem_size,0>>>(n,g_idata,g_odata); break;
case 512:
reduce<512><<<nblocks,512,shmem_size,0>>>(n,g_idata,g_odata); break;
case 256:
reduce<256><<<nblocks,256,shmem_size,0>>>(n,g_idata,g_odata); break;
case 128:
reduce<128><<<nblocks,128,shmem_size,0>>>(n,g_idata,g_odata); break;
case 64:
reduce<64><<<nblocks,64,shmem_size,0>>>(n,g_idata,g_odata); break;
default:
std::cerr << "thread_block must be a power of 2 between 64 and 1024" << std::endl;
}
return;
}
double gpu_reduce(int n, double *d_v)
{
reduce_alloc_wrk();
cudaMemset((void *) o_data, 0, max_blocks*sizeof(double));
cudaMemset((void *)d_res_data,0,(red_sz)*sizeof(double));
do_gpu_reduce(n, d_v, o_data);
do_gpu_reduce(max_blocks,o_data,d_res_data);
cudaError_t err = cudaMemcpy(h_res_data, d_res_data,
red_sz*sizeof(double), cudaMemcpyDeviceToHost);
return(Reduction(red_sz,h_res_data));
}
// Returns a random number from range [0, 1).
double rand_double() {
return static_cast<double>(rand()) / RAND_MAX;
}
int main(int argc, char** argv) {
if (argc < 3) {
std::cerr << "Usage: " <<argv[0] << " N Threads_per_block" << std::endl;
exit(1);
}
int N = atoi(argv[1]);
thread_block = atoi(argv[2]);
switch(thread_block) {
case 1024:
case 512:
case 256:
case 128:
case 64:
break;
default:
std::cerr << "thread_block must be a power of 2 between 64 and 1024" << std::endl;
exit(1);
}
double bdwdth;
double *h_x=(double *) malloc(N*sizeof(double));
double *d_x;
srand(time(0));
for (int i=0; i<N; i++)
h_x[i]=rand_double();
cudaError_t err=cudaMalloc((void **)&d_x,(N*sizeof(double)));
err = cudaMemcpy(d_x, h_x, N*sizeof(double), cudaMemcpyHostToDevice);
reduce_alloc_wrk();
StopWatchInterface* timer = 0;
sdkCreateTimer(&timer);
std::cout << "Testing reduction algorithm " << VERSION << " on a DOUBLE vector of size: " << N << std::endl;
// Calculate the reduction on the host.
timer->start();
double cpu_sum = Reduction(N, h_x);
timer->stop();
std::cout << "CPU reduction: " << cpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
// ------------ GPU reduction
timer->reset();
timer->start();
double gpu_sum = gpu_reduce(N,d_x);
timer->stop();
bdwdth = ((double)N*sizeof(double))/timer->getTime();
bdwdth *= 1.e-6;
std::cout << "GPU reduction: " << gpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
std::cout << "Relative difference: " << abs(gpu_sum-cpu_sum)/gpu_sum << std::endl;
std::cout << "Measured bandwidth: " << bdwdth << " GB/s" << std::endl;
// ------------------------------- Cleaning up ------------------------------ //
delete timer;
checkCudaErrors(cudaDeviceReset());
return 0;
}
|
58f812ffeb311c2cef1a80595f92f128439f5efe.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
int main(int argc, char *argv[])
{
int n = atoi(argv[1]); //TODO: atoi is an unsafe function
int nbiter = atoi(argv[2]);
float *array = new float[n];
for(int i = 0; i < n; ++i)
array[i] = 1.;
float *d_array;
hipHostMalloc((void **)&d_array, n * sizeof(float));
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for(int iter = 0; iter < nbiter; ++iter)
hipMemcpy(d_array, array, n * sizeof(float), hipMemcpyHostToDevice);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end - begin);
hipHostFree(d_array);
std::cout << n*sizeof(float)/1000 <<" "<< (n*sizeof(float))/(totaltime.count()*nbiter) << std::endl;
delete[] array;
return 0;
}
| 58f812ffeb311c2cef1a80595f92f128439f5efe.cu | #include <iostream>
#include <chrono>
int main(int argc, char *argv[])
{
int n = atoi(argv[1]); //TODO: atoi is an unsafe function
int nbiter = atoi(argv[2]);
float *array = new float[n];
for(int i = 0; i < n; ++i)
array[i] = 1.;
float *d_array;
cudaMallocHost((void **)&d_array, n * sizeof(float));
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for(int iter = 0; iter < nbiter; ++iter)
cudaMemcpy(d_array, array, n * sizeof(float), cudaMemcpyHostToDevice);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end - begin);
cudaFreeHost(d_array);
std::cout << n*sizeof(float)/1000 <<" "<< (n*sizeof(float))/(totaltime.count()*nbiter) << std::endl;
delete[] array;
return 0;
}
|
dee0609e08a86809ae4878e68edb143047e344f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
/*#include <cublasXt.h>*/
//#include <cublas_api.h>
// helper functions and utilities to work with CUDA
#include "Driver.cuh"
#include <helper_math.h>
__global__
static void batched_random_uniform_kernel(
const unsigned long seed,
const float offset,
const float scale,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
hiprandStatePhilox4_32_10_t s;
hiprand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
float4 r = hiprand_uniform4(&s) * scale + offset;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&r.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = r;
}
}
}
}
__global__
static void batched_random_uniform_sparse_kernel(
const unsigned long seed,
const float offset,
const float scale,
const float sparsity,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
hiprandStatePhilox4_32_10_t s;
// seed a random number generator
hiprand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
auto dice = hiprand_uniform4(&s);
auto value = hiprand_uniform4(&s) * scale + offset;
value.x = dice.x < sparsity ? 0.0f : value.x;
value.y = dice.y < sparsity ? 0.0f : value.y;
value.z = dice.z < sparsity ? 0.0f : value.z;
value.w = dice.w < sparsity ? 0.0f : value.w;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&value.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = value;
}
}
}
}
void random_uniform(const hipStream_t &stream,
const unsigned long &seed,
const float &a, const float &b, const float &sparsity,
const std::size_t &batch_size, const std::size_t &rows, const std::size_t &cols,
float **x, const std::size_t &x_stride, const bool &blank_diagonal)
{
auto scale = b - a;
auto offset = a;
dim3 grid, block;
block.x = 32;
block.y = 32;
block.z = 1;
grid.x = (cols / 4 + block.x - 1) / block.x;
grid.y = (rows + block.y - 1) / block.y;
grid.z = (batch_size + block.z - 1) / block.z;
if (sparsity > 0.0f)
{
batched_random_uniform_sparse_kernel << < grid, block, 0, stream >> > (
seed,
offset, scale, sparsity,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
else
{
batched_random_uniform_kernel << < grid, block, 0, stream >> > (
seed,
offset, scale,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
checkCudaErrors(hipGetLastError());
}
__global__
static void batched_random_gaussian_kernel(
const unsigned long seed,
const float mu,
const float sigma,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
hiprandStatePhilox4_32_10_t s;
hiprand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
float4 r = hiprand_normal4(&s) * sigma + mu;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&r.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = r;
}
}
}
}__global__
static void batched_random_gaussian_sparse_kernel(
const unsigned long seed,
const float mu,
const float sigma,
const float sparsity,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
hiprandStatePhilox4_32_10_t s;
// seed a random number generator
hiprand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
auto dice = hiprand_uniform4(&s);
auto value = hiprand_normal4(&s) * sigma + mu;
value.x = dice.x < sparsity ? 0.0f : value.x;
value.y = dice.y < sparsity ? 0.0f : value.y;
value.z = dice.z < sparsity ? 0.0f : value.z;
value.w = dice.w < sparsity ? 0.0f : value.w;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&value.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = value;
}
}
}
}
void random_gaussian(const hipStream_t &stream,
const unsigned long &seed,
const float &mu, const float &sigma, const float &sparsity,
const std::size_t &batch_size, const std::size_t &rows, const std::size_t &cols,
float **x, const std::size_t &x_stride, const bool &blank_diagonal)
{
dim3 grid, block;
block.x = 32;
block.y = 32;
block.z = 1;
grid.x = (cols / 4 + block.x - 1) / block.x;
grid.y = (rows + block.y - 1) / block.y;
grid.z = (batch_size + block.z - 1) / block.z;
if (sparsity > 0.0f)
{
batched_random_gaussian_sparse_kernel << < grid, block, 0, stream >> > (
seed,
mu, sigma, sparsity,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
else
{
batched_random_gaussian_kernel << < grid, block, 0, stream >> > (
seed,
mu, sigma,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
checkCudaErrors(hipGetLastError());
}
| dee0609e08a86809ae4878e68edb143047e344f1.cu | #include "stdafx.h"
#include <cuda.h>
#include <curand_kernel.h>
/*#include <cublasXt.h>*/
//#include <cublas_api.h>
// helper functions and utilities to work with CUDA
#include "Driver.cuh"
#include <helper_math.h>
__global__
static void batched_random_uniform_kernel(
const unsigned long seed,
const float offset,
const float scale,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
curandStatePhilox4_32_10_t s;
curand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
float4 r = curand_uniform4(&s) * scale + offset;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&r.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = r;
}
}
}
}
__global__
static void batched_random_uniform_sparse_kernel(
const unsigned long seed,
const float offset,
const float scale,
const float sparsity,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
curandStatePhilox4_32_10_t s;
// seed a random number generator
curand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
auto dice = curand_uniform4(&s);
auto value = curand_uniform4(&s) * scale + offset;
value.x = dice.x < sparsity ? 0.0f : value.x;
value.y = dice.y < sparsity ? 0.0f : value.y;
value.z = dice.z < sparsity ? 0.0f : value.z;
value.w = dice.w < sparsity ? 0.0f : value.w;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&value.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = value;
}
}
}
}
void random_uniform(const cudaStream_t &stream,
const unsigned long &seed,
const float &a, const float &b, const float &sparsity,
const std::size_t &batch_size, const std::size_t &rows, const std::size_t &cols,
float **x, const std::size_t &x_stride, const bool &blank_diagonal)
{
auto scale = b - a;
auto offset = a;
dim3 grid, block;
block.x = 32;
block.y = 32;
block.z = 1;
grid.x = (cols / 4 + block.x - 1) / block.x;
grid.y = (rows + block.y - 1) / block.y;
grid.z = (batch_size + block.z - 1) / block.z;
if (sparsity > 0.0f)
{
batched_random_uniform_sparse_kernel << < grid, block, 0, stream >> > (
seed,
offset, scale, sparsity,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
else
{
batched_random_uniform_kernel << < grid, block, 0, stream >> > (
seed,
offset, scale,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
checkCudaErrors(cudaGetLastError());
}
__global__
static void batched_random_gaussian_kernel(
const unsigned long seed,
const float mu,
const float sigma,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
curandStatePhilox4_32_10_t s;
curand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
float4 r = curand_normal4(&s) * sigma + mu;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&r.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = r;
}
}
}
}__global__
static void batched_random_gaussian_sparse_kernel(
const unsigned long seed,
const float mu,
const float sigma,
const float sparsity,
const int batch_size, const int rows, const int cols,
float ** __restrict__ x, const int x_stride, bool blank_diagonal
)
{
for (int batch = blockIdx.z * blockDim.z + threadIdx.z; batch < batch_size; batch += gridDim.z * blockDim.z)
{
float *X = x[batch];
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < rows; row += gridDim.y * blockDim.y)
{
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < cols >> 2; col += gridDim.x * blockDim.x)
{
curandStatePhilox4_32_10_t s;
// seed a random number generator
curand_init(seed + col * rows + row + batch * rows * cols, 0, 0, &s);
auto dice = curand_uniform4(&s);
auto value = curand_normal4(&s) * sigma + mu;
value.x = dice.x < sparsity ? 0.0f : value.x;
value.y = dice.y < sparsity ? 0.0f : value.y;
value.z = dice.z < sparsity ? 0.0f : value.z;
value.w = dice.w < sparsity ? 0.0f : value.w;
if (blank_diagonal && (row >> 0x2) == col)
{
((float *)&value.x)[row & 0x3] = 0.0f;
}
reinterpret_cast<float4 *>(&X[row * x_stride])[col] = value;
}
}
}
}
void random_gaussian(const cudaStream_t &stream,
const unsigned long &seed,
const float &mu, const float &sigma, const float &sparsity,
const std::size_t &batch_size, const std::size_t &rows, const std::size_t &cols,
float **x, const std::size_t &x_stride, const bool &blank_diagonal)
{
dim3 grid, block;
block.x = 32;
block.y = 32;
block.z = 1;
grid.x = (cols / 4 + block.x - 1) / block.x;
grid.y = (rows + block.y - 1) / block.y;
grid.z = (batch_size + block.z - 1) / block.z;
if (sparsity > 0.0f)
{
batched_random_gaussian_sparse_kernel << < grid, block, 0, stream >> > (
seed,
mu, sigma, sparsity,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
else
{
batched_random_gaussian_kernel << < grid, block, 0, stream >> > (
seed,
mu, sigma,
batch_size, rows, cols, x, x_stride, blank_diagonal);
}
checkCudaErrors(cudaGetLastError());
}
|
0b20fd493f7b6080454d94ca57f5e2110881169f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "types.h"
__global__ void kNN_Kernel(float* input, float* output, const int k, const float r, const float h, const graphic g) {
int2 index = { (int)(blockDim.x * blockIdx.x + threadIdx.x), (int)(blockDim.y * blockIdx.y + threadIdx.y) };
float C = 0.0f, knned_input = 0.0f, importance = 0.0f;
if (index.x < g.w && index.y < g.h) {
//TODO; Corresponding border of input.
for (int i = max(index.x - k, 0); i <= min(index.x + k, g.w - 1); i++) {
for (int j = max(index.y - k, 0); j <= min(index.y + k, g.h - 1); j++) {
importance = ((index.x - i) * (index.x - i) + (index.y - j) * (index.y - j)) / (r*r) +
((input[index.x + index.y * g.w] - input[i + j * g.w]) * (input[index.x + index.y * g.w] - input[i + j * g.w])) / (h*h);
knned_input += input[i + j * g.w] * expf(-max(importance, 0.0f));
C += expf(-max(importance, 0.0f));
}
}
knned_input /= C;
output[index.x + index.y * g.w] = knned_input;
}
}
void kNNdenoise(float* input, const int k, const float r, const float h, const graphic g) {
dim3 block3 = { 512, 512, 1 }, thread3 = {g.w / block3.x + 1, g.h / block3.y + 1, 1 };
float *dev_input, *dev_output;
hipError_t cudaStatus;
// Malloc device array on the gpu kernel.
cudaStatus = hipMalloc((void**)& dev_input, g.w * g.h * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! at kNN denoising.\n");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_output, g.w * g.h * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! at kNN denoising.\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_input, input, g.w * g.h * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_output, input, g.w * g.h * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
hipLaunchKernelGGL(( kNN_Kernel) , dim3(block3), dim3(thread3) , 0, 0, dev_input, dev_output, k, r, h, g);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "kNN_Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching kNN_Kernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(input, dev_output, g.w * g.h * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
Error:
hipFree(dev_input);
hipFree(dev_output);
}
__device__ float ssd(const int2 p, const int2 q, const float* input, const int w, const graphic g) {
float toReturn = 0.0f, S;
//TODO; Corresponding border of input.
for (int i = max(-w, max(-p.x, -q.x)); i <= min(w, min(g.w - p.x, g.w - q.x)); i++) {
for (int j = max(-w, max(-p.y, -q.y)); j <= min(w, min(g.h - p.y, g.h - q.y)); j++) {
toReturn += (input[i + p.x + (p.y + j) * g.w] - input[i + q.x + (q.y + j) * g.w]) * (input[i + p.x + (p.y + j) * g.w] - input[i + q.x + (q.y + j) * g.w]);
}
}
S = (min(w, min(g.w - p.x, g.w - q.x)) + max(-w, max(-p.x, -q.x)) + 1) * (max(-w, max(-p.y, -q.y)) + min(w, min(g.h - p.y, g.h - q.y)) + 1);
return toReturn / S;
}
__forceinline__ __device__ float norm2(int2 p, int2 q) {
return (p.x - q.x)* (p.x - q.x) + (p.y - q.y) * (p.y - q.y);
}
__global__ void NLM_Kernel(float* input, float* output, const int k, const int w, const float sigma, const float h, const graphic g) {
int2 index = { (int)(blockDim.x * blockIdx.x + threadIdx.x), (int)(blockDim.y * blockIdx.y + threadIdx.y) };
float Z = 0.0f, denoised_input = 0.0f, weight = 0.0f;
if (index.x < g.w && index.y < g.h) {
for (int i = max(index.x - k, 0); i <= min(index.x + k, g.w - 1); i++) {
for (int j = max(index.y - k, 0); j <= min(index.y + k, g.h - 1); j++) {
weight = expf(-max(ssd(index, int2{ i, j }, input, w, g) / (h * h) + 2 * (sigma * sigma), 0.0f));
denoised_input += input[i + j * g.w] * weight;
Z += weight;
}
}
denoised_input /= Z;
output[index.x + index.y * g.w] = denoised_input;
}
}
void NLMdenoise(float* input, const int k, const int w, const float sigma, const float h, const graphic g) {
dim3 block3 = { 512, 512, 1 }, thread3 = { g.w / block3.x + 1, g.h / block3.y + 1, 1 };
float* dev_input, * dev_output;
hipError_t cudaStatus;
// Malloc device array on the gpu kernel.
cudaStatus = hipMalloc((void**)& dev_input, g.w * g.h * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! at kNN denoising.\n");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_output, g.w * g.h * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! at kNN denoising.\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_input, input, g.w * g.h * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_output, input, g.w * g.h * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
hipLaunchKernelGGL(( NLM_Kernel) , dim3(block3), dim3(thread3) , 0, 0, dev_input, dev_output, k, w, sigma, h, g);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "kNN_Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching kNN_Kernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(input, dev_output, g.w * g.h * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
Error:
hipFree(dev_input);
hipFree(dev_output);
}
__device__ float Turkey_bi_weight(const float d, const float h) {
return (0.0f < d && d <= h) ? (1.0f - ((d * d) / (h * h)) * 1.0f - ((d * d) / (h * h))) / 2.0f : 1.0f;
}
__device__ float Wr(const float up, const float uq, const float s) {
return expf(-max(((up - uq) * (up - uq) / (2 * s * s)), 0.0f));
}
__device__ float Ws(const int2 p, const int2 q, const float r) {
return expf(-max(norm2(p, q)/(2 * r * r), 0.0f));
}
__global__ void improvedNLM_Kernel(float* input, float* output, const int k, const int w, const float h, const float s, const float r, const graphic g) {
int2 index = { (int)(blockDim.x * blockIdx.x + threadIdx.x), (int)(blockDim.y * blockIdx.y + threadIdx.y) };
float Z = 0.0f, denoised_input = 0.0f, weight = 0.0f, dij = 0.0f;
if (index.x < g.w && index.y < g.h) {
for (int i = max(index.x - k, 0); i <= min(index.x + k, g.w - 1); i++) {
for (int j = max(index.y - k, 0); j <= min(index.y + k, g.h - 1); j++) {
dij = ssd(index, int2{ i, j }, input, w, g);
weight = expf(-max((dij / (h * h)) + 2 * h * h, 0.0f)) * Turkey_bi_weight(dij, h) * Wr(input[index.x + index.y * g.w], input[i + j * g.w], r) * Ws(index, int2{ i, j }, s);
denoised_input += input[i + j * g.w] * weight;
Z += weight;
}
}
denoised_input /= Z;
output[index.x + index.y * g.w] = denoised_input;
}
}
void improvedNLMdenoise(float* input, const int k, const int w, const float h, const float s, const float r, const graphic g) {
dim3 block3 = { 512, 512, 1 }, thread3 = { g.w / block3.x + 1, g.h / block3.y + 1, 1 };
float* dev_input, * dev_output;
hipError_t cudaStatus;
// Malloc device array on the gpu kernel.
cudaStatus = hipMalloc((void**)& dev_input, g.w * g.h * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! at kNN denoising.\n");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_output, g.w * g.h * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! at kNN denoising.\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_input, input, g.w * g.h * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_output, input, g.w * g.h * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
improvedNLM_Kernel << <block3, thread3 >> > (dev_input, dev_output, k, w, h, s, r, g);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "kNN_Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching kNN_Kernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(input, dev_output, g.w * g.h * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed at kNN denoising!\n");
goto Error;
}
Error:
hipFree(dev_input);
hipFree(dev_output);
} | 0b20fd493f7b6080454d94ca57f5e2110881169f.cu | #include "types.h"
__global__ void kNN_Kernel(float* input, float* output, const int k, const float r, const float h, const graphic g) {
int2 index = { (int)(blockDim.x * blockIdx.x + threadIdx.x), (int)(blockDim.y * blockIdx.y + threadIdx.y) };
float C = 0.0f, knned_input = 0.0f, importance = 0.0f;
if (index.x < g.w && index.y < g.h) {
//TODO; Corresponding border of input.
for (int i = max(index.x - k, 0); i <= min(index.x + k, g.w - 1); i++) {
for (int j = max(index.y - k, 0); j <= min(index.y + k, g.h - 1); j++) {
importance = ((index.x - i) * (index.x - i) + (index.y - j) * (index.y - j)) / (r*r) +
((input[index.x + index.y * g.w] - input[i + j * g.w]) * (input[index.x + index.y * g.w] - input[i + j * g.w])) / (h*h);
knned_input += input[i + j * g.w] * expf(-max(importance, 0.0f));
C += expf(-max(importance, 0.0f));
}
}
knned_input /= C;
output[index.x + index.y * g.w] = knned_input;
}
}
void kNNdenoise(float* input, const int k, const float r, const float h, const graphic g) {
dim3 block3 = { 512, 512, 1 }, thread3 = {g.w / block3.x + 1, g.h / block3.y + 1, 1 };
float *dev_input, *dev_output;
cudaError_t cudaStatus;
// Malloc device array on the gpu kernel.
cudaStatus = cudaMalloc((void**)& dev_input, g.w * g.h * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! at kNN denoising.\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_output, g.w * g.h * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! at kNN denoising.\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, g.w * g.h * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_output, input, g.w * g.h * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
kNN_Kernel <<<block3, thread3 >>> (dev_input, dev_output, k, r, h, g);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kNN_Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching kNN_Kernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(input, dev_output, g.w * g.h * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
Error:
cudaFree(dev_input);
cudaFree(dev_output);
}
__device__ float ssd(const int2 p, const int2 q, const float* input, const int w, const graphic g) {
float toReturn = 0.0f, S;
//TODO; Corresponding border of input.
for (int i = max(-w, max(-p.x, -q.x)); i <= min(w, min(g.w - p.x, g.w - q.x)); i++) {
for (int j = max(-w, max(-p.y, -q.y)); j <= min(w, min(g.h - p.y, g.h - q.y)); j++) {
toReturn += (input[i + p.x + (p.y + j) * g.w] - input[i + q.x + (q.y + j) * g.w]) * (input[i + p.x + (p.y + j) * g.w] - input[i + q.x + (q.y + j) * g.w]);
}
}
S = (min(w, min(g.w - p.x, g.w - q.x)) + max(-w, max(-p.x, -q.x)) + 1) * (max(-w, max(-p.y, -q.y)) + min(w, min(g.h - p.y, g.h - q.y)) + 1);
return toReturn / S;
}
__forceinline__ __device__ float norm2(int2 p, int2 q) {
return (p.x - q.x)* (p.x - q.x) + (p.y - q.y) * (p.y - q.y);
}
__global__ void NLM_Kernel(float* input, float* output, const int k, const int w, const float sigma, const float h, const graphic g) {
int2 index = { (int)(blockDim.x * blockIdx.x + threadIdx.x), (int)(blockDim.y * blockIdx.y + threadIdx.y) };
float Z = 0.0f, denoised_input = 0.0f, weight = 0.0f;
if (index.x < g.w && index.y < g.h) {
for (int i = max(index.x - k, 0); i <= min(index.x + k, g.w - 1); i++) {
for (int j = max(index.y - k, 0); j <= min(index.y + k, g.h - 1); j++) {
weight = expf(-max(ssd(index, int2{ i, j }, input, w, g) / (h * h) + 2 * (sigma * sigma), 0.0f));
denoised_input += input[i + j * g.w] * weight;
Z += weight;
}
}
denoised_input /= Z;
output[index.x + index.y * g.w] = denoised_input;
}
}
void NLMdenoise(float* input, const int k, const int w, const float sigma, const float h, const graphic g) {
dim3 block3 = { 512, 512, 1 }, thread3 = { g.w / block3.x + 1, g.h / block3.y + 1, 1 };
float* dev_input, * dev_output;
cudaError_t cudaStatus;
// Malloc device array on the gpu kernel.
cudaStatus = cudaMalloc((void**)& dev_input, g.w * g.h * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! at kNN denoising.\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_output, g.w * g.h * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! at kNN denoising.\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, g.w * g.h * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_output, input, g.w * g.h * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
NLM_Kernel <<<block3, thread3 >>> (dev_input, dev_output, k, w, sigma, h, g);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kNN_Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching kNN_Kernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(input, dev_output, g.w * g.h * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
Error:
cudaFree(dev_input);
cudaFree(dev_output);
}
__device__ float Turkey_bi_weight(const float d, const float h) {
return (0.0f < d && d <= h) ? (1.0f - ((d * d) / (h * h)) * 1.0f - ((d * d) / (h * h))) / 2.0f : 1.0f;
}
__device__ float Wr(const float up, const float uq, const float s) {
return expf(-max(((up - uq) * (up - uq) / (2 * s * s)), 0.0f));
}
__device__ float Ws(const int2 p, const int2 q, const float r) {
return expf(-max(norm2(p, q)/(2 * r * r), 0.0f));
}
__global__ void improvedNLM_Kernel(float* input, float* output, const int k, const int w, const float h, const float s, const float r, const graphic g) {
int2 index = { (int)(blockDim.x * blockIdx.x + threadIdx.x), (int)(blockDim.y * blockIdx.y + threadIdx.y) };
float Z = 0.0f, denoised_input = 0.0f, weight = 0.0f, dij = 0.0f;
if (index.x < g.w && index.y < g.h) {
for (int i = max(index.x - k, 0); i <= min(index.x + k, g.w - 1); i++) {
for (int j = max(index.y - k, 0); j <= min(index.y + k, g.h - 1); j++) {
dij = ssd(index, int2{ i, j }, input, w, g);
weight = expf(-max((dij / (h * h)) + 2 * h * h, 0.0f)) * Turkey_bi_weight(dij, h) * Wr(input[index.x + index.y * g.w], input[i + j * g.w], r) * Ws(index, int2{ i, j }, s);
denoised_input += input[i + j * g.w] * weight;
Z += weight;
}
}
denoised_input /= Z;
output[index.x + index.y * g.w] = denoised_input;
}
}
void improvedNLMdenoise(float* input, const int k, const int w, const float h, const float s, const float r, const graphic g) {
dim3 block3 = { 512, 512, 1 }, thread3 = { g.w / block3.x + 1, g.h / block3.y + 1, 1 };
float* dev_input, * dev_output;
cudaError_t cudaStatus;
// Malloc device array on the gpu kernel.
cudaStatus = cudaMalloc((void**)& dev_input, g.w * g.h * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! at kNN denoising.\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_output, g.w * g.h * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! at kNN denoising.\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, g.w * g.h * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_output, input, g.w * g.h * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
improvedNLM_Kernel << <block3, thread3 >> > (dev_input, dev_output, k, w, h, s, r, g);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kNN_Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching kNN_Kernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(input, dev_output, g.w * g.h * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed at kNN denoising!\n");
goto Error;
}
Error:
cudaFree(dev_input);
cudaFree(dev_output);
} |
3707b95c62256fd758231d1053fffdc2d5cdae13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <time.h>
#define MINVAL 0.00
#define MAXVAL 10.0
#define TOL 1e-5
double CPS = 2.9e9;
//////////////////////////// CUDA RELATED ////////////////////////////////////
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void SOR_kernel(float* arr, float* res, int len, float OMEGA)
{
// start with some bounds checking to be safe
if ((threadIdx.x >= 0) && (threadIdx.x < 15))
{
if ((threadIdx.y >= 0) && (threadIdx.y < 15))
{
// variables needed for SOR
int i_start, i_end, j_start, j_end;
float change = 0;
// set start point for threads
if (threadIdx.x == 0) i_start = 1;
else i_start = threadIdx.x * 128;
if (threadIdx.y == 0) j_start = 1;
else j_start = threadIdx.y * 128;
// set end point for threads
if (threadIdx.x == 15) i_end = 2046;
else i_end = threadIdx.x * 128 + 127;
if (threadIdx.y == 15) j_end = 2046;
else j_end = threadIdx.y * 128 + 127;
// begin the SOR this portion is responsible for
int i,j,k;
for (k = 0; k < 2000; k++) //2k iterations of SOR
{
for (i = i_start; i <= i_end; i++)
{
for (j = j_start; j <= j_end; j++)
{
change = arr[i*len+j] - 0.25 * (arr[(i-1)*len+j] + arr[(i+1)*len+j] + arr[i*len+j+1] + arr[i*len+j-1]);
//__syncthreads();
arr[i*len+j] -= change * OMEGA;
//__syncthreads();
}
}
}
// copy to result
for(i = i_start; i <= i_end; i++)
{
for(j = j_start; j <=j_end; j++)
{
res[i * len + j] = arr[i * len +j];
}
}
}
}
}
///////////////////////////// MATRIX STUFF ////////////////////////////////////////
float* matrix_create(int len);
int matrix_init(float* mat, int len);
int matrix_zero(float* mat, int len);
void SOR_CPU(float* mat, int len, float OMEGA);
///////////////// Time related //////////////////////////////
//rdtsc related
typedef union {
unsigned long long int64;
struct {unsigned int lo, hi;} int32;
} mcps_tctr;
#define MCPS_RDTSC(cpu_c) __asm__ __volatile__ ("rdtsc" : \
"=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi))
int clock_gettime(clockid_t clk_id, struct timespec *tp);
struct timespec diff(struct timespec start, struct timespec end);
double ts_ms(struct timespec ts);
struct timespec ts_diff(struct timespec start, struct timespec end);
double measure_cps(void);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
int LEN = 2048;
int size = LEN * LEN * sizeof(float);
float OMEGA = 1.97;
// CUDA Timing
hipEvent_t start, stop;
float d_time;
//CPU timing
struct timespec time1, time2;
double h_time;
float *h_mat, *d_mat, *h_res, *d_res;
// set up matrix on host
measure_cps();
h_mat = matrix_create(LEN);
if(!h_mat) return 0;
if(!matrix_init(h_mat, LEN)) return 0;
h_res = matrix_create(LEN);
if(!h_res) return 0;
if(!matrix_zero(h_res, LEN)) return 0;
// set up device
d_mat = NULL;
CUDA_SAFE_CALL(hipSetDevice(0));
CUDA_SAFE_CALL(hipMalloc((void**)&d_mat, size));
CUDA_SAFE_CALL(hipMalloc((void**)&d_res, size));
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy(d_mat, h_mat, size, hipMemcpyHostToDevice));
// Launch the kernel
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(1, 1, 1);
hipLaunchKernelGGL(( SOR_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_mat, d_res, LEN, OMEGA);
CUDA_SAFE_CALL(hipPeekAtLastError());
CUDA_SAFE_CALL(hipDeviceSynchronize());
// Transfer the results back to the host
CUDA_SAFE_CALL(hipMemcpy(h_res, d_res, size, hipMemcpyDeviceToHost));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&d_time, start, stop);
printf("\nGPU time: %f (msec)\n", d_time);
hipEventDestroy(start);
hipEventDestroy(stop);
// CPU SOR and comparison
clock_gettime(CLOCK_REALTIME, &time1);
SOR_CPU(h_mat, LEN, OMEGA);
clock_gettime(CLOCK_REALTIME, &time2);
h_time = ts_ms(ts_diff(time1, time2));
printf("\nCPU timeL %lf (msec)\n", h_time);
int i, num_elements;
num_elements = LEN * LEN;
for(i = 0; i < num_elements; i++)
{
if((h_mat - h_res) > (float) TOL)
{
printf("\nResult verification failed at element %d\n", i);
return 0;
}
}
// Free stuff
CUDA_SAFE_CALL(hipFree(d_mat));
free(h_res);
free(h_mat);
printf("\nDone\n");
return 0;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////// MATRIX IMPLEMENTATIONS ////////////////////////////////////////
float float_rand(float min, float max)
{
float f = (float)random()/RAND_MAX;
return min + f * (max - min);
}
float* matrix_create(int len)
{
float* arr;
if(len > 0)
{
arr = (float*) calloc(len*len, sizeof(float));
if(!arr)
{
printf("\n\tFailed to allocate array\n");
return NULL;
}
}
else return NULL;
return arr;
}
int matrix_init(float* mat, int len)
{
int len_sq, i;
if(len > 0)
{
len_sq = len * len;
for (i = 0; i < len_sq; i++)
{
mat[i] = float_rand(MINVAL, MAXVAL);
}
return 1;
}
printf("\nError in initializing matrix\n");
return 0;
}
int matrix_zero(float* mat, int len)
{
int len_sq, i;
if(len > 0)
{
len_sq = len * len;
for(i = 0; i < len_sq; i++)
{
mat[i] = 0;
}
return 1;
}
printf("\nFailed to zero matrix\n");
return 0;
}
void SOR_CPU(float* mat, int len, float OMEGA)
{
int i, j, k;
float change = 0;
int q_idx;
for(k = 0; k < 2000; k++)
{
for(i = 0; i < len; i++)
{
for(j = 0; j < len; j++)
{
q_idx = i * len + j;
change = mat[q_idx] - 0.25 * (mat[q_idx-len] + mat[q_idx+len] + mat[q_idx-1] +mat[q_idx+1]);
mat[q_idx] -= change * OMEGA;
}
}
}
}
///////////////////////////// Timing related ///////////////////////////////
double ts_ms(struct timespec ts)
{
return ((((double)(ts.tv_sec))*1.0e9) + ((double)(ts.tv_nsec)))/(1.0e6);
}
/* ---------------------------------------------------------------------------
| Make the CPU busy, and measure CPS (cycles per second).
|
| Explanation:
| If tests are very fast, they can run so quickly that the SpeedStep control
| (in kernel and/or on-chip) doesn't notice in time, and the first few tests
| might finish while the CPU is still in its sleep state (about 800 MHz,
| judging from my measurements)
| A simple way to get around this is to run some kind of busy-loop that
| forces the OS and/or CPU to notice it needs to go to full clock speed.
| We print out the results of the computation so the loop won't get optimised
| away.
|
| Copy this code into other programs as desired. It provides three entry
| points:
|
| double ts_sec(ts): converts a timespec into seconds
| timespec ts_diff(ts1, ts2): computes interval between two timespecs
| measure_cps(): Does the busy loop and prints out measured CPS (cycles/sec)
--------------------------------------------------------------------------- */
struct timespec ts_diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
double measure_cps()
{
struct timespec cal_start, cal_end;
mcps_tctr tsc_start, tsc_end;
double total_time;
double total_cycles;
/* We perform a chaotic iteration and print the result, to defeat
compiler optimisation */
double chaosC = -1.8464323952913974; double z = 0.0;
long int i, ilim, j;
/* Do it twice and throw away results from the first time; this ensures the
* OS and CPU will notice it's busy and set the clock speed. */
for(j=0; j<2; j++) {
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_start);
MCPS_RDTSC(tsc_start);
ilim = 50*1000*1000;
for (i=0; i<ilim; i++)
z = z * z + chaosC;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_end);
MCPS_RDTSC(tsc_end);
}
total_time = ts_ms(ts_diff(cal_start, cal_end));
total_cycles = (double)(tsc_end.int64-tsc_start.int64);
CPS = total_cycles / total_time;
printf("z == %f, CPS == %g\n", z, CPS);
return CPS;
}
/* ---------------------------------------------------------------------------
| End of measure_cps code
--------------------------------------------------------------------------- */
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
| 3707b95c62256fd758231d1053fffdc2d5cdae13.cu | #include <cstdio>
#include <cstdlib>
#include <math.h>
#include <time.h>
#define MINVAL 0.00
#define MAXVAL 10.0
#define TOL 1e-5
double CPS = 2.9e9;
//////////////////////////// CUDA RELATED ////////////////////////////////////
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void SOR_kernel(float* arr, float* res, int len, float OMEGA)
{
// start with some bounds checking to be safe
if ((threadIdx.x >= 0) && (threadIdx.x < 15))
{
if ((threadIdx.y >= 0) && (threadIdx.y < 15))
{
// variables needed for SOR
int i_start, i_end, j_start, j_end;
float change = 0;
// set start point for threads
if (threadIdx.x == 0) i_start = 1;
else i_start = threadIdx.x * 128;
if (threadIdx.y == 0) j_start = 1;
else j_start = threadIdx.y * 128;
// set end point for threads
if (threadIdx.x == 15) i_end = 2046;
else i_end = threadIdx.x * 128 + 127;
if (threadIdx.y == 15) j_end = 2046;
else j_end = threadIdx.y * 128 + 127;
// begin the SOR this portion is responsible for
int i,j,k;
for (k = 0; k < 2000; k++) //2k iterations of SOR
{
for (i = i_start; i <= i_end; i++)
{
for (j = j_start; j <= j_end; j++)
{
change = arr[i*len+j] - 0.25 * (arr[(i-1)*len+j] + arr[(i+1)*len+j] + arr[i*len+j+1] + arr[i*len+j-1]);
//__syncthreads();
arr[i*len+j] -= change * OMEGA;
//__syncthreads();
}
}
}
// copy to result
for(i = i_start; i <= i_end; i++)
{
for(j = j_start; j <=j_end; j++)
{
res[i * len + j] = arr[i * len +j];
}
}
}
}
}
///////////////////////////// MATRIX STUFF ////////////////////////////////////////
float* matrix_create(int len);
int matrix_init(float* mat, int len);
int matrix_zero(float* mat, int len);
void SOR_CPU(float* mat, int len, float OMEGA);
///////////////// Time related //////////////////////////////
//rdtsc related
typedef union {
unsigned long long int64;
struct {unsigned int lo, hi;} int32;
} mcps_tctr;
#define MCPS_RDTSC(cpu_c) __asm__ __volatile__ ("rdtsc" : \
"=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi))
int clock_gettime(clockid_t clk_id, struct timespec *tp);
struct timespec diff(struct timespec start, struct timespec end);
double ts_ms(struct timespec ts);
struct timespec ts_diff(struct timespec start, struct timespec end);
double measure_cps(void);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
int LEN = 2048;
int size = LEN * LEN * sizeof(float);
float OMEGA = 1.97;
// CUDA Timing
cudaEvent_t start, stop;
float d_time;
//CPU timing
struct timespec time1, time2;
double h_time;
float *h_mat, *d_mat, *h_res, *d_res;
// set up matrix on host
measure_cps();
h_mat = matrix_create(LEN);
if(!h_mat) return 0;
if(!matrix_init(h_mat, LEN)) return 0;
h_res = matrix_create(LEN);
if(!h_res) return 0;
if(!matrix_zero(h_res, LEN)) return 0;
// set up device
d_mat = NULL;
CUDA_SAFE_CALL(cudaSetDevice(0));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_mat, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_res, size));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy(d_mat, h_mat, size, cudaMemcpyHostToDevice));
// Launch the kernel
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(1, 1, 1);
SOR_kernel<<<dimGrid, dimBlock>>>(d_mat, d_res, LEN, OMEGA);
CUDA_SAFE_CALL(cudaPeekAtLastError());
CUDA_SAFE_CALL(cudaThreadSynchronize());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_res, d_res, size, cudaMemcpyDeviceToHost));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&d_time, start, stop);
printf("\nGPU time: %f (msec)\n", d_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// CPU SOR and comparison
clock_gettime(CLOCK_REALTIME, &time1);
SOR_CPU(h_mat, LEN, OMEGA);
clock_gettime(CLOCK_REALTIME, &time2);
h_time = ts_ms(ts_diff(time1, time2));
printf("\nCPU timeL %lf (msec)\n", h_time);
int i, num_elements;
num_elements = LEN * LEN;
for(i = 0; i < num_elements; i++)
{
if((h_mat - h_res) > (float) TOL)
{
printf("\nResult verification failed at element %d\n", i);
return 0;
}
}
// Free stuff
CUDA_SAFE_CALL(cudaFree(d_mat));
free(h_res);
free(h_mat);
printf("\nDone\n");
return 0;
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////// MATRIX IMPLEMENTATIONS ////////////////////////////////////////
float float_rand(float min, float max)
{
float f = (float)random()/RAND_MAX;
return min + f * (max - min);
}
float* matrix_create(int len)
{
float* arr;
if(len > 0)
{
arr = (float*) calloc(len*len, sizeof(float));
if(!arr)
{
printf("\n\tFailed to allocate array\n");
return NULL;
}
}
else return NULL;
return arr;
}
int matrix_init(float* mat, int len)
{
int len_sq, i;
if(len > 0)
{
len_sq = len * len;
for (i = 0; i < len_sq; i++)
{
mat[i] = float_rand(MINVAL, MAXVAL);
}
return 1;
}
printf("\nError in initializing matrix\n");
return 0;
}
int matrix_zero(float* mat, int len)
{
int len_sq, i;
if(len > 0)
{
len_sq = len * len;
for(i = 0; i < len_sq; i++)
{
mat[i] = 0;
}
return 1;
}
printf("\nFailed to zero matrix\n");
return 0;
}
void SOR_CPU(float* mat, int len, float OMEGA)
{
int i, j, k;
float change = 0;
int q_idx;
for(k = 0; k < 2000; k++)
{
for(i = 0; i < len; i++)
{
for(j = 0; j < len; j++)
{
q_idx = i * len + j;
change = mat[q_idx] - 0.25 * (mat[q_idx-len] + mat[q_idx+len] + mat[q_idx-1] +mat[q_idx+1]);
mat[q_idx] -= change * OMEGA;
}
}
}
}
///////////////////////////// Timing related ///////////////////////////////
double ts_ms(struct timespec ts)
{
return ((((double)(ts.tv_sec))*1.0e9) + ((double)(ts.tv_nsec)))/(1.0e6);
}
/* ---------------------------------------------------------------------------
| Make the CPU busy, and measure CPS (cycles per second).
|
| Explanation:
| If tests are very fast, they can run so quickly that the SpeedStep control
| (in kernel and/or on-chip) doesn't notice in time, and the first few tests
| might finish while the CPU is still in its sleep state (about 800 MHz,
| judging from my measurements)
| A simple way to get around this is to run some kind of busy-loop that
| forces the OS and/or CPU to notice it needs to go to full clock speed.
| We print out the results of the computation so the loop won't get optimised
| away.
|
| Copy this code into other programs as desired. It provides three entry
| points:
|
| double ts_sec(ts): converts a timespec into seconds
| timespec ts_diff(ts1, ts2): computes interval between two timespecs
| measure_cps(): Does the busy loop and prints out measured CPS (cycles/sec)
--------------------------------------------------------------------------- */
struct timespec ts_diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
double measure_cps()
{
struct timespec cal_start, cal_end;
mcps_tctr tsc_start, tsc_end;
double total_time;
double total_cycles;
/* We perform a chaotic iteration and print the result, to defeat
compiler optimisation */
double chaosC = -1.8464323952913974; double z = 0.0;
long int i, ilim, j;
/* Do it twice and throw away results from the first time; this ensures the
* OS and CPU will notice it's busy and set the clock speed. */
for(j=0; j<2; j++) {
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_start);
MCPS_RDTSC(tsc_start);
ilim = 50*1000*1000;
for (i=0; i<ilim; i++)
z = z * z + chaosC;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cal_end);
MCPS_RDTSC(tsc_end);
}
total_time = ts_ms(ts_diff(cal_start, cal_end));
total_cycles = (double)(tsc_end.int64-tsc_start.int64);
CPS = total_cycles / total_time;
printf("z == %f, CPS == %g\n", z, CPS);
return CPS;
}
/* ---------------------------------------------------------------------------
| End of measure_cps code
--------------------------------------------------------------------------- */
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
|
197c75220f32c12f242676cb4ca0683d65c776c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHReduce.cuh"
#include "common.h"
#include <thrust/functional.h>
struct PReLUUpdateOutput
{
float* weight_;
PReLUUpdateOutput(float* weight)
: weight_(weight)
{}
__device__ __forceinline__ void operator()(float *out, float *in)
{
float x = *in;
*out = (x > 0) ? x : weight_[0] * x;
}
};
__global__ void preluForward(float *output, const float *input, const float *weight, int n, int nElemsPerSample, int mapSize)
{
CUDA_KERNEL_LOOP(i, n)
{
int positionInSample = i % nElemsPerSample;
int mapNumber = positionInSample / mapSize;
output[i] = input[i] > 0 ? input[i] : input[i] * weight[mapNumber];
}
}
void THNN_CudaPReLU_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
THCudaTensor *weight,
long nOutputPlane)
{
THCudaTensor_resizeAs(state, output, input);
float *w = THCudaTensor_data(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply2(state, output, input, PReLUUpdateOutput(w));
}
else
{
int ndim = THCudaTensor_nDimension(state, input);
input = THCudaTensor_newContiguous(state, input);
int n = THCudaTensor_nElement(state, input);
int mapSize = 1;
if (ndim == 3)
mapSize = (input->size[1] * input->size[2]);
else if (ndim == 4)
mapSize = (input->size[2] * input->size[3]);
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluForward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCudaTensor_free(state, input);
}
}
struct PReLUUpdateGradInput
{
float *weight_;
PReLUUpdateGradInput(float *weight)
: weight_(weight)
{}
__device__ __forceinline__ void operator()(float *gradInput, float *gradOutput, float *input)
{
*gradInput = *input > 0 ? *gradOutput : *gradOutput * *weight_;
}
};
__global__ void preluBackward(
float *gradInput,
const float *input,
const float *weight,
const float *gradOutput,
int n, int nElemsPerSample, int mapSize)
{
CUDA_KERNEL_LOOP(i, n)
{
int positionInSample = i % nElemsPerSample;
int mapNumber = positionInSample / mapSize;
gradInput[i] = input[i] > 0 ? gradOutput[i] : gradOutput[i] * weight[mapNumber];
}
}
void THNN_CudaPReLU_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *weight,
long nOutputPlane)
{
THCudaTensor_resizeAs(state, gradInput, input);
float *w = THCudaTensor_data(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, gradOutput, input, PReLUUpdateGradInput(w));
}
else
{
int ndim = THCudaTensor_nDimension(state, input);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int n = THCudaTensor_nElement(state, input);
int mapSize = 1;
if (ndim == 3)
mapSize = (input->size[1] * input->size[2]);
else if (ndim == 4)
mapSize = (input->size[2] * input->size[3]);
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, input),
w,
THCudaTensor_data(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
}
struct PReLUAccGradParametersShared
{
__device__ __forceinline__ void operator()(float *gradInput, float *input, float *gradOutput)
{
*gradInput = (*input) * (*gradOutput) * (*input <= 0);
}
};
struct PReLUAccGradParameters
{
float scale;
PReLUAccGradParameters(float scale)
: scale(scale)
{}
__device__ __forceinline__ void operator()(float *gradInput, float *input, float *gradOutput)
{
*gradInput = (*input) * (*gradOutput) * scale * (*input <= 0);
}
};
struct PReLUAccGradParameters1to1
{
float scale;
PReLUAccGradParameters1to1(float scale)
: scale(scale)
{}
__device__ __forceinline__ void operator()(float *gradWeight, float *input, float *gradOutput)
{
*gradWeight += (*input) * (*gradOutput) * scale * (*input <= 0);
}
};
void THNN_CudaPReLU_accGradParameters(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *weight,
THCudaTensor *gradWeight,
THCudaTensor *gradWeightBuf,
THCudaTensor *gradWeightBuf2,
long nOutputPlane,
float scale)
{
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParametersShared());
// introduces a sync point
float sum = THCudaTensor_sumall(state, gradInput);
float w = THCudaTensor_get1d(state, gradWeight, 0);
THCudaTensor_set1d(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_CudaPReLU_updateGradInput(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
else
{
int ndim = THCudaTensor_nDimension(state, input);
if (ndim == 1)
{
THC_pointwiseApply3(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1(scale));
}
else
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParameters(scale));
THCudaTensor *sumbuf = gradWeightBuf2;
THCudaTensor_resizeAs(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCudaTensor_sum(state, gradWeightBuf, gradInput, 0);
THCudaTensor_cadd(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else if (ndim == 3)
{
THCudaTensor *buffer = THCudaTensor_newContiguous(state, gradInput);
THCudaTensor_resize2d(state, buffer, nOutputPlane, input->size[1] * input->size[2]);
THCudaTensor_sum(state, gradWeightBuf, buffer, 1);
THCudaTensor_cadd(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCudaTensor_free(state, buffer);
}
else if (ndim == 4)
{
THCudaTensor *buffer = THCudaTensor_newContiguous(state, gradInput);
THCudaTensor_resize3d(state, buffer, input->size[0], nOutputPlane, input->size[2] * input->size[3]);
THCudaTensor_resize2d(state, sumbuf, input->size[0], nOutputPlane);
THCudaTensor_sum(state, sumbuf, buffer, 2);
THCudaTensor_sum(state, gradWeightBuf, sumbuf, 0);
THCudaTensor_cadd(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCudaTensor_free(state, buffer);
}
// restore gradInput
THNN_CudaPReLU_updateGradInput(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
}
}
| 197c75220f32c12f242676cb4ca0683d65c776c4.cu | #include "THCUNN.h"
#include "THCReduce.cuh"
#include "common.h"
#include <thrust/functional.h>
struct PReLUUpdateOutput
{
float* weight_;
PReLUUpdateOutput(float* weight)
: weight_(weight)
{}
__device__ __forceinline__ void operator()(float *out, float *in)
{
float x = *in;
*out = (x > 0) ? x : weight_[0] * x;
}
};
__global__ void preluForward(float *output, const float *input, const float *weight, int n, int nElemsPerSample, int mapSize)
{
CUDA_KERNEL_LOOP(i, n)
{
int positionInSample = i % nElemsPerSample;
int mapNumber = positionInSample / mapSize;
output[i] = input[i] > 0 ? input[i] : input[i] * weight[mapNumber];
}
}
void THNN_CudaPReLU_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
THCudaTensor *weight,
long nOutputPlane)
{
THCudaTensor_resizeAs(state, output, input);
float *w = THCudaTensor_data(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply2(state, output, input, PReLUUpdateOutput(w));
}
else
{
int ndim = THCudaTensor_nDimension(state, input);
input = THCudaTensor_newContiguous(state, input);
int n = THCudaTensor_nElement(state, input);
int mapSize = 1;
if (ndim == 3)
mapSize = (input->size[1] * input->size[2]);
else if (ndim == 4)
mapSize = (input->size[2] * input->size[3]);
int nElemsPerSample = nOutputPlane * mapSize;
preluForward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCudaTensor_free(state, input);
}
}
struct PReLUUpdateGradInput
{
float *weight_;
PReLUUpdateGradInput(float *weight)
: weight_(weight)
{}
__device__ __forceinline__ void operator()(float *gradInput, float *gradOutput, float *input)
{
*gradInput = *input > 0 ? *gradOutput : *gradOutput * *weight_;
}
};
__global__ void preluBackward(
float *gradInput,
const float *input,
const float *weight,
const float *gradOutput,
int n, int nElemsPerSample, int mapSize)
{
CUDA_KERNEL_LOOP(i, n)
{
int positionInSample = i % nElemsPerSample;
int mapNumber = positionInSample / mapSize;
gradInput[i] = input[i] > 0 ? gradOutput[i] : gradOutput[i] * weight[mapNumber];
}
}
void THNN_CudaPReLU_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *weight,
long nOutputPlane)
{
THCudaTensor_resizeAs(state, gradInput, input);
float *w = THCudaTensor_data(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, gradOutput, input, PReLUUpdateGradInput(w));
}
else
{
int ndim = THCudaTensor_nDimension(state, input);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int n = THCudaTensor_nElement(state, input);
int mapSize = 1;
if (ndim == 3)
mapSize = (input->size[1] * input->size[2]);
else if (ndim == 4)
mapSize = (input->size[2] * input->size[3]);
int nElemsPerSample = nOutputPlane * mapSize;
preluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, input),
w,
THCudaTensor_data(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
}
struct PReLUAccGradParametersShared
{
__device__ __forceinline__ void operator()(float *gradInput, float *input, float *gradOutput)
{
*gradInput = (*input) * (*gradOutput) * (*input <= 0);
}
};
struct PReLUAccGradParameters
{
float scale;
PReLUAccGradParameters(float scale)
: scale(scale)
{}
__device__ __forceinline__ void operator()(float *gradInput, float *input, float *gradOutput)
{
*gradInput = (*input) * (*gradOutput) * scale * (*input <= 0);
}
};
struct PReLUAccGradParameters1to1
{
float scale;
PReLUAccGradParameters1to1(float scale)
: scale(scale)
{}
__device__ __forceinline__ void operator()(float *gradWeight, float *input, float *gradOutput)
{
*gradWeight += (*input) * (*gradOutput) * scale * (*input <= 0);
}
};
void THNN_CudaPReLU_accGradParameters(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *weight,
THCudaTensor *gradWeight,
THCudaTensor *gradWeightBuf,
THCudaTensor *gradWeightBuf2,
long nOutputPlane,
float scale)
{
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParametersShared());
// introduces a sync point
float sum = THCudaTensor_sumall(state, gradInput);
float w = THCudaTensor_get1d(state, gradWeight, 0);
THCudaTensor_set1d(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_CudaPReLU_updateGradInput(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
else
{
int ndim = THCudaTensor_nDimension(state, input);
if (ndim == 1)
{
THC_pointwiseApply3(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1(scale));
}
else
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParameters(scale));
THCudaTensor *sumbuf = gradWeightBuf2;
THCudaTensor_resizeAs(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCudaTensor_sum(state, gradWeightBuf, gradInput, 0);
THCudaTensor_cadd(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else if (ndim == 3)
{
THCudaTensor *buffer = THCudaTensor_newContiguous(state, gradInput);
THCudaTensor_resize2d(state, buffer, nOutputPlane, input->size[1] * input->size[2]);
THCudaTensor_sum(state, gradWeightBuf, buffer, 1);
THCudaTensor_cadd(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCudaTensor_free(state, buffer);
}
else if (ndim == 4)
{
THCudaTensor *buffer = THCudaTensor_newContiguous(state, gradInput);
THCudaTensor_resize3d(state, buffer, input->size[0], nOutputPlane, input->size[2] * input->size[3]);
THCudaTensor_resize2d(state, sumbuf, input->size[0], nOutputPlane);
THCudaTensor_sum(state, sumbuf, buffer, 2);
THCudaTensor_sum(state, gradWeightBuf, sumbuf, 0);
THCudaTensor_cadd(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCudaTensor_free(state, buffer);
}
// restore gradInput
THNN_CudaPReLU_updateGradInput(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
}
}
|
b45c87290240179da01b8f74f476130337df9589.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
static void HandleError(hipError_t err,
const char* file,
int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError( err, __FILE__, __LINE__ ))
//
__global__ void add(int a,int b, int *c)
{
*c = a + b;
}
int main()
{
int c;
int* dev_c;
HANDLE_ERROR(hipMalloc((void**)&dev_c, sizeof(int)));
add << <1, 1 >> > (2, 7, dev_c);
HANDLE_ERROR(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
printf("2+7=%d\n", c);
hipFree(dev_c);
return 0;
} | b45c87290240179da01b8f74f476130337df9589.cu |
#include "cuda_runtime.h"
#include <iostream>
static void HandleError(cudaError_t err,
const char* file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError( err, __FILE__, __LINE__ ))
//여기에 커널 작성
__global__ void add(int a,int b, int *c)
{
*c = a + b;
}
int main()
{
int c;
int* dev_c;
HANDLE_ERROR(cudaMalloc((void**)&dev_c, sizeof(int)));
add << <1, 1 >> > (2, 7, dev_c);
HANDLE_ERROR(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
printf("2+7=%d\n", c);
cudaFree(dev_c);
return 0;
} |
6b445013328ab0704a045c60835afdeeba46bde1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "common/CpuTimer.h"
#include "common/Error.h"
#include "common/GpuTimer.h"
#include "common/Matrix.h"
#include "common/Utilities.h"
#define N 1024
#define K 16
void compareResults(Matrix<int> h_a, Matrix<int> h_b) {
int i, j;
for (i = 0; i < h_a.width; i++) {
for (j = 0; j < h_a.height; j++) {
assert(h_a.elements[j * h_a.width + i] ==
h_b.elements[i * h_b.width + j]);
}
}
}
// Kernel v3 using K threads and N/K blocks
// Try this example with 8, 16 and 32 threads by block
__global__ void transposedMatrixKernel_threads_blocks(Matrix<int> d_a,
Matrix<int> d_b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
d_b.setElement(i, j, d_a.getElement(j, i));
}
// Kernel v2 using the max number of threads in 1 block
__global__ void transposedMatrixKernel_threads(Matrix<int> d_a,
Matrix<int> d_b,
int THREADS) {
int i = threadIdx.x;
int j = 0;
while (i < N) {
while (j < N) {
d_b.setElement(i, j, d_a.getElement(j, i));
j++;
}
i += THREADS;
}
}
// Kernel v1 using 1 thread and 1 block
__global__ void transposedMatrixKernel(Matrix<int> d_a, Matrix<int> d_b) {
int i = 0;
int j = 0;
while (i < d_a.width) {
j = 0;
while (j < d_a.height) {
d_b.setElement(i, j, d_a.getElement(j, i));
j++;
}
i++;
}
}
// Host function
void transposedMatrixHost(Matrix<int> d_a, Matrix<int> d_b) {
// start timer
CpuTimer timer;
timer.Start();
int i, j;
for (i = 0; i < d_a.width; i++) {
for (j = 0; j < d_a.height; j++) {
d_b.setElement(i, j, d_a.getElement(j, i));
}
}
// stop timer
timer.Stop();
// print time
printf("Time Host: %f ms\n", timer.Elapsed());
}
void onDevice(Matrix<int> h_a, Matrix<int> h_b) {
// declare GPU data
Matrix<int> d_a, d_b;
d_a.width = h_a.width;
d_a.height = h_a.height;
d_b.width = h_b.width;
d_b.height = h_b.height;
const int ARRAY_BYTES = d_a.width * d_a.height * sizeof(int);
// allocate memory on the GPU
HANDLER_ERROR_ERR(hipMalloc((void**)&d_a.elements, ARRAY_BYTES));
HANDLER_ERROR_ERR(hipMalloc((void**)&d_b.elements, ARRAY_BYTES));
// copy data from CPU the GPU
HANDLER_ERROR_ERR(hipMemcpy(d_a.elements, h_a.elements, ARRAY_BYTES,
hipMemcpyHostToDevice));
HANDLER_ERROR_ERR(hipMemcpy(d_b.elements, h_b.elements, ARRAY_BYTES,
hipMemcpyHostToDevice));
GpuTimer timer;
// -*- [1] -*-
timer.Start();
hipLaunchKernelGGL(( transposedMatrixKernel), dim3(1), dim3(1), 0, 0, d_a, d_b);
HANDLER_ERROR_MSG("kernel panic!!!");
timer.Stop();
printf("Time Device serial: %f ms\n", timer.Elapsed());
bandwidth(N, timer.Elapsed());
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(hipMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES,
hipMemcpyDeviceToHost));
compareResults(h_a, h_b);
// -*- [2] -*-
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
int THREADS = prop.maxThreadsPerBlock;
timer.Start();
hipLaunchKernelGGL(( transposedMatrixKernel_threads), dim3(1), dim3(THREADS), 0, 0, d_a, d_b, THREADS);
timer.Stop();
printf("Time Device threads: %f ms\n", timer.Elapsed());
bandwidth(N, timer.Elapsed());
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(hipMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES,
hipMemcpyDeviceToHost));
compareResults(h_a, h_b);
// -*- [3] -*-
timer.Start();
dim3 GridBlocks(N / K, N / K);
dim3 ThreadsBlocks(K, K);
hipLaunchKernelGGL(( transposedMatrixKernel_threads_blocks), dim3(GridBlocks), dim3(ThreadsBlocks), 0, 0, d_a,
d_b);
HANDLER_ERROR_MSG("kernel panic!!!");
timer.Stop();
printf("Time Device threads and blocks: %f ms\n", timer.Elapsed());
bandwidth(N, timer.Elapsed());
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(hipMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES,
hipMemcpyDeviceToHost));
compareResults(h_a, h_b);
// free GPU memory
HANDLER_ERROR_ERR(hipFree(d_a.elements));
HANDLER_ERROR_ERR(hipFree(d_b.elements));
}
void test(Matrix<int> h_a, Matrix<int> h_b) {
transposedMatrixHost(h_a, h_b);
compareResults(h_a, h_b);
}
void onHost() {
Matrix<int> h_a, h_b, h_c;
h_a.width = N;
h_a.height = N;
h_b.width = N;
h_b.height = N;
h_c.width = N;
h_c.height = N;
h_a.elements = (int*)malloc(h_a.width * h_b.height * sizeof(int));
h_b.elements = (int*)malloc(h_b.width * h_b.height * sizeof(int));
h_c.elements = (int*)malloc(h_b.width * h_b.height * sizeof(int));
int i, j, k = 0;
for (i = 0; i < h_a.width; i++) {
for (j = 0; j < h_a.height; j++) {
h_a.elements[j * h_a.width + i] = k;
h_b.elements[j * h_b.width + i] = 0;
k++;
}
}
// call host function
test(h_a, h_b);
// call device configuration
onDevice(h_a, h_c);
printf("-: successful execution :-\n");
}
int main() {
onHost();
}
| 6b445013328ab0704a045c60835afdeeba46bde1.cu | #include <assert.h>
#include <stdio.h>
#include "common/CpuTimer.h"
#include "common/Error.h"
#include "common/GpuTimer.h"
#include "common/Matrix.h"
#include "common/Utilities.h"
#define N 1024
#define K 16
void compareResults(Matrix<int> h_a, Matrix<int> h_b) {
int i, j;
for (i = 0; i < h_a.width; i++) {
for (j = 0; j < h_a.height; j++) {
assert(h_a.elements[j * h_a.width + i] ==
h_b.elements[i * h_b.width + j]);
}
}
}
// Kernel v3 using K threads and N/K blocks
// Try this example with 8, 16 and 32 threads by block
__global__ void transposedMatrixKernel_threads_blocks(Matrix<int> d_a,
Matrix<int> d_b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
d_b.setElement(i, j, d_a.getElement(j, i));
}
// Kernel v2 using the max number of threads in 1 block
__global__ void transposedMatrixKernel_threads(Matrix<int> d_a,
Matrix<int> d_b,
int THREADS) {
int i = threadIdx.x;
int j = 0;
while (i < N) {
while (j < N) {
d_b.setElement(i, j, d_a.getElement(j, i));
j++;
}
i += THREADS;
}
}
// Kernel v1 using 1 thread and 1 block
__global__ void transposedMatrixKernel(Matrix<int> d_a, Matrix<int> d_b) {
int i = 0;
int j = 0;
while (i < d_a.width) {
j = 0;
while (j < d_a.height) {
d_b.setElement(i, j, d_a.getElement(j, i));
j++;
}
i++;
}
}
// Host function
void transposedMatrixHost(Matrix<int> d_a, Matrix<int> d_b) {
// start timer
CpuTimer timer;
timer.Start();
int i, j;
for (i = 0; i < d_a.width; i++) {
for (j = 0; j < d_a.height; j++) {
d_b.setElement(i, j, d_a.getElement(j, i));
}
}
// stop timer
timer.Stop();
// print time
printf("Time Host: %f ms\n", timer.Elapsed());
}
void onDevice(Matrix<int> h_a, Matrix<int> h_b) {
// declare GPU data
Matrix<int> d_a, d_b;
d_a.width = h_a.width;
d_a.height = h_a.height;
d_b.width = h_b.width;
d_b.height = h_b.height;
const int ARRAY_BYTES = d_a.width * d_a.height * sizeof(int);
// allocate memory on the GPU
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a.elements, ARRAY_BYTES));
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b.elements, ARRAY_BYTES));
// copy data from CPU the GPU
HANDLER_ERROR_ERR(cudaMemcpy(d_a.elements, h_a.elements, ARRAY_BYTES,
cudaMemcpyHostToDevice));
HANDLER_ERROR_ERR(cudaMemcpy(d_b.elements, h_b.elements, ARRAY_BYTES,
cudaMemcpyHostToDevice));
GpuTimer timer;
// -*- [1] -*-
timer.Start();
transposedMatrixKernel<<<1, 1>>>(d_a, d_b);
HANDLER_ERROR_MSG("kernel panic!!!");
timer.Stop();
printf("Time Device serial: %f ms\n", timer.Elapsed());
bandwidth(N, timer.Elapsed());
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(cudaMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES,
cudaMemcpyDeviceToHost));
compareResults(h_a, h_b);
// -*- [2] -*-
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int THREADS = prop.maxThreadsPerBlock;
timer.Start();
transposedMatrixKernel_threads<<<1, THREADS>>>(d_a, d_b, THREADS);
timer.Stop();
printf("Time Device threads: %f ms\n", timer.Elapsed());
bandwidth(N, timer.Elapsed());
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(cudaMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES,
cudaMemcpyDeviceToHost));
compareResults(h_a, h_b);
// -*- [3] -*-
timer.Start();
dim3 GridBlocks(N / K, N / K);
dim3 ThreadsBlocks(K, K);
transposedMatrixKernel_threads_blocks<<<GridBlocks, ThreadsBlocks>>>(d_a,
d_b);
HANDLER_ERROR_MSG("kernel panic!!!");
timer.Stop();
printf("Time Device threads and blocks: %f ms\n", timer.Elapsed());
bandwidth(N, timer.Elapsed());
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(cudaMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES,
cudaMemcpyDeviceToHost));
compareResults(h_a, h_b);
// free GPU memory
HANDLER_ERROR_ERR(cudaFree(d_a.elements));
HANDLER_ERROR_ERR(cudaFree(d_b.elements));
}
void test(Matrix<int> h_a, Matrix<int> h_b) {
transposedMatrixHost(h_a, h_b);
compareResults(h_a, h_b);
}
void onHost() {
Matrix<int> h_a, h_b, h_c;
h_a.width = N;
h_a.height = N;
h_b.width = N;
h_b.height = N;
h_c.width = N;
h_c.height = N;
h_a.elements = (int*)malloc(h_a.width * h_b.height * sizeof(int));
h_b.elements = (int*)malloc(h_b.width * h_b.height * sizeof(int));
h_c.elements = (int*)malloc(h_b.width * h_b.height * sizeof(int));
int i, j, k = 0;
for (i = 0; i < h_a.width; i++) {
for (j = 0; j < h_a.height; j++) {
h_a.elements[j * h_a.width + i] = k;
h_b.elements[j * h_b.width + i] = 0;
k++;
}
}
// call host function
test(h_a, h_b);
// call device configuration
onDevice(h_a, h_c);
printf("-: successful execution :-\n");
}
int main() {
onHost();
}
|
2c13ba2745dde877276c674f87d5f91971a4c041.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixFunc(float *F, int size)
{
#pragma unroll 16
for(int k = 0; k < 100; k++)
#pragma unroll 16
for(int i = 1; i < size; i++)
for(int j = 0; j < size - 1; j++)
F[i * size + j] = F[(i-1) * size + j + 1] + F[i * size + j + 1];
} | 2c13ba2745dde877276c674f87d5f91971a4c041.cu | #include "includes.h"
__global__ void matrixFunc(float *F, int size)
{
#pragma unroll 16
for(int k = 0; k < 100; k++)
#pragma unroll 16
for(int i = 1; i < size; i++)
for(int j = 0; j < size - 1; j++)
F[i * size + j] = F[(i-1) * size + j + 1] + F[i * size + j + 1];
} |
11f0bd5aa0666f8e2b894a68c586e9a0ddbe09dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
//TODO: not sure if this block is needed
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/random.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float invWidth;
float invHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
__device__ __inline__ void
shadePixelSmallCircles(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float& redPix, float& greenPix, float& bluePix, float& alphaPix) {
//shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
//TODO: why in 2 steps -- is it to avoid some hazard???!!
/*
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
*/
redPix = alpha * rgb.x + oneMinusAlpha * redPix;
greenPix = alpha * rgb.y + oneMinusAlpha * greenPix;
bluePix = alpha * rgb.z + oneMinusAlpha * bluePix;
alphaPix = alpha + alphaPix;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
const int sharedSize = 2850;
const int totalThreads = blockDim.x * blockDim.y;
__shared__ int sharedData[sharedSize];
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
int sharedCirclePairs = end_addr - start_addr;
int data_per_thread;
int sharedDataOverhead = 0;
if(sharedCirclePairs<sharedSize)
data_per_thread = (end_addr-start_addr + totalThreads-1)/totalThreads;
else{
data_per_thread = (sharedSize+totalThreads-1)/totalThreads;
sharedDataOverhead = 1;
}
for(int i=0; i < data_per_thread; i++ ){
int tid = threadIdx.y * blockDim.y + threadIdx.x;
if(tid < sharedCirclePairs && (i + data_per_thread * tid) < sharedSize){
sharedData[i + data_per_thread * tid] = circleImgBlockList[start_addr + i + data_per_thread * tid];
}
}
__syncthreads();
if(sharedCirclePairs){
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x ; // + 16*(blockIdx.z % 1);
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y ; // + 16*(blockIdx.z / 1);
float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
//k*# of pixels, added with linear thread ID
//Unrolled the k loop to avoid loop overhead
int index ;
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
if(sharedDataOverhead && ((arrIdx - start_addr) >= sharedSize))
index = circleImgBlockList[arrIdx] - 1;
else
index = sharedData[arrIdx-start_addr] - 1;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
//float rad = cuConstRendererParams.radius[index];
shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
}
// __syncthreads();
cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.invWidth = 1.f / image->width;
params.invHeight =1.f/image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
__global__ void make_circleImgBlockArray(int *circleImgBlockArray, int *circleImgBlockId, int imgBlockWidth, int imgBlockNum) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
//printf("Index : %d\n", index);
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
/*
printf("MinX = %d\n",screenMinX/imgBlockWidth);
printf("MaxX = %d\n",screenMaxX/imgBlockWidth);
printf("MinY = %d\n",screenMinY/imgBlockWidth);
printf("MaxY = %d\n",screenMaxY/imgBlockWidth);
*/
for (short x = (screenMinX/imgBlockWidth); x <= (screenMaxX/imgBlockWidth); x++) {
for (short y = (screenMinY/imgBlockWidth); y <= (screenMaxY/imgBlockWidth); y++) {
if((x == imgBlockNum) || (y == imgBlockNum)) { continue;}
circleImgBlockArray[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = 1;
circleImgBlockId[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = index+1;
//printf("Index = %d %d %d\n", x, y, index);
//printf("HERE!!!!\n");
}
}
}
__global__ void print_kernel(int length, int* input) {
printf("HERE\n");
for(int i=0; i< length; i++) {
printf("input[%d] = %d\n", i, input[i]);
}
}
__global__ void compare_array(int length, int* array1, int* array2) {
for(int i=0; i< length; i++) {
if(array1[i] != array2[i]) {
printf("Arrays don't match. Expected = %d, Got = %d\n", array1[i], array2[i]);
}
}
}
__global__ void getRefCircleArray(int* refCircleImgArray) {
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//printf("ID: %d\n" , index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles);
refCircleImgArray[index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles] = circleInBox;
}
}
//predicate functor
template <typename T>
struct is_not_zero : public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(T x)
{
return (x != 0);
}
};
// convert a linear index to + data_per_thread * tid a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
__global__ void kernelRenderSmallCircles(int index, int imageWidth, int imageHeight, int screenMinX, int screenMinY, int screenMaxX, int screenMaxY) {
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
int x = blockIdx.x*blockDim.x + threadIdx.x + screenMinX;
int y = blockIdx.y*blockDim.y + threadIdx.y + screenMinY;
if(x >= screenMaxX) return;
if(y >= screenMaxY) return;
/*
const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
if(offset >= (screenMaxX - screenMinX) * (screenMaxY - screenMinY)) return;
int x = (offset % (screenMaxX - screenMinX)) + screenMinX;
int y = (offset / (screenMaxX - screenMinX)) + screenMinY;
*/
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixelSmallCircles(index, pixelCenterNorm, p, imgPtr);
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
int* circleImgBlockArray = NULL;
int* circleImgBlockId = NULL;
//printf("NumCircles = %d\n",numCircles);
if (numCircles < 5) {
for (int i = 0; i < numCircles; i++) {
// read p sition and radius
int index3 = 3 * i;
float3 p = *(float3*)(&position[index3]);
float rad = radius[i];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
dim3 blockDim(16, 16);
dim3 gridDim(((screenMaxX - screenMinX) + blockDim.x - 1) / blockDim.x, ((screenMaxY - screenMinY) + blockDim.y - 1) / blockDim.y);
hipLaunchKernelGGL(( kernelRenderSmallCircles), dim3(gridDim), dim3(blockDim), 0, 0, i, imageWidth, imageHeight, screenMinX, screenMinY, screenMaxX, screenMaxY);
gpuErrchk(hipDeviceSynchronize());
}
} else {
int imgBlockNum = 32;
int numImgBlocks = imgBlockNum * imgBlockNum;
int numElements = numCircles * imgBlockNum * imgBlockNum;
hipMalloc(&circleImgBlockArray, sizeof(int) * numElements);
hipMalloc(&circleImgBlockId, sizeof(int) * numElements);
//gpuErrchk(hipDeviceSynchronize());
dim3 blockDim(512, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
hipLaunchKernelGGL(( make_circleImgBlockArray), dim3(gridDim), dim3(blockDim), 0, 0, circleImgBlockArray,circleImgBlockId,imageWidth/imgBlockNum, imgBlockNum);
/*Convert the 2D circle block array into 1 D array by removing 0 values */
thrust::device_ptr<int> thrust_arr = thrust::device_pointer_cast(circleImgBlockArray);
thrust::device_ptr<int> thrust_circleid = thrust::device_pointer_cast(circleImgBlockId);
// allocate storage for rowu sums and indices
thrust::device_vector<int> row_sums(numImgBlocks+1);
thrust::device_vector<int> row_indices(numImgBlocks);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)) + numElements,
thrust_arr,
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
thrust::fill(thrust::device, row_sums.end() - 1, row_sums.end(), 0);
//thrust::copy(row_sums.begin(), row_sums.end(), std::ostream_iterator<int>(std::cout, " "));
thrust::device_vector<int> circleStartAddr(numImgBlocks+1);
thrust::exclusive_scan(row_sums.begin(), row_sums.end(), circleStartAddr.begin());
//thrust::copy(circleStartAddr.begin(), circleStartAddr.end(), std::ostream_iterator<float>(std::cout, " "));
//int num_pairs = thrust::reduce(thrust_arr, thrust_arr + numElements);
int num_pairs = circleStartAddr[numImgBlocks];
//printf("SUM = %d\n", num_pairs);
//hipFree(circleImgBlockArray);
//allocate the right size of array
//This array will be traversed by each block -- by using starting address from circleStartAddr
thrust::device_vector<int> circleImgBlockList(num_pairs);
thrust::copy_if(thrust_circleid, thrust_circleid + numElements, circleImgBlockList.begin(), is_not_zero<int>());
//hipFree(circleImgBlockId);
//thrust::copy(circleImgBlockList.begin(), circleImgBlockList.end(), std::ostream_iterator<float>(std::cout, " "));
dim3 gridDim3(imgBlockNum, imgBlockNum);
dim3 blockDim3(1024/imgBlockNum,1024/imgBlockNum);
int *deviceStartAddr = NULL;
deviceStartAddr = thrust::raw_pointer_cast(circleStartAddr.data());
int *deviceImgBlockList = NULL;
deviceImgBlockList = thrust::raw_pointer_cast(circleImgBlockList.data());
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim3), dim3(blockDim3), 0, 0, deviceImgBlockList, deviceStartAddr);
gpuErrchk(hipDeviceSynchronize());
}
}
| 11f0bd5aa0666f8e2b894a68c586e9a0ddbe09dc.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
//TODO: not sure if this block is needed
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/random.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float invWidth;
float invHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
__device__ __inline__ void
shadePixelSmallCircles(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float& redPix, float& greenPix, float& bluePix, float& alphaPix) {
//shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
//TODO: why in 2 steps -- is it to avoid some hazard???!!
/*
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
*/
redPix = alpha * rgb.x + oneMinusAlpha * redPix;
greenPix = alpha * rgb.y + oneMinusAlpha * greenPix;
bluePix = alpha * rgb.z + oneMinusAlpha * bluePix;
alphaPix = alpha + alphaPix;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
const int sharedSize = 2850;
const int totalThreads = blockDim.x * blockDim.y;
__shared__ int sharedData[sharedSize];
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
int sharedCirclePairs = end_addr - start_addr;
int data_per_thread;
int sharedDataOverhead = 0;
if(sharedCirclePairs<sharedSize)
data_per_thread = (end_addr-start_addr + totalThreads-1)/totalThreads;
else{
data_per_thread = (sharedSize+totalThreads-1)/totalThreads;
sharedDataOverhead = 1;
}
for(int i=0; i < data_per_thread; i++ ){
int tid = threadIdx.y * blockDim.y + threadIdx.x;
if(tid < sharedCirclePairs && (i + data_per_thread * tid) < sharedSize){
sharedData[i + data_per_thread * tid] = circleImgBlockList[start_addr + i + data_per_thread * tid];
}
}
__syncthreads();
if(sharedCirclePairs){
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x ; // + 16*(blockIdx.z % 1);
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y ; // + 16*(blockIdx.z / 1);
float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
//k*# of pixels, added with linear thread ID
//Unrolled the k loop to avoid loop overhead
int index ;
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
if(sharedDataOverhead && ((arrIdx - start_addr) >= sharedSize))
index = circleImgBlockList[arrIdx] - 1;
else
index = sharedData[arrIdx-start_addr] - 1;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
//float rad = cuConstRendererParams.radius[index];
shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
}
// __syncthreads();
cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.invWidth = 1.f / image->width;
params.invHeight =1.f/image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
__global__ void make_circleImgBlockArray(int *circleImgBlockArray, int *circleImgBlockId, int imgBlockWidth, int imgBlockNum) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
//printf("Index : %d\n", index);
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
/*
printf("MinX = %d\n",screenMinX/imgBlockWidth);
printf("MaxX = %d\n",screenMaxX/imgBlockWidth);
printf("MinY = %d\n",screenMinY/imgBlockWidth);
printf("MaxY = %d\n",screenMaxY/imgBlockWidth);
*/
for (short x = (screenMinX/imgBlockWidth); x <= (screenMaxX/imgBlockWidth); x++) {
for (short y = (screenMinY/imgBlockWidth); y <= (screenMaxY/imgBlockWidth); y++) {
if((x == imgBlockNum) || (y == imgBlockNum)) { continue;}
circleImgBlockArray[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = 1;
circleImgBlockId[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = index+1;
//printf("Index = %d %d %d\n", x, y, index);
//printf("HERE!!!!\n");
}
}
}
__global__ void print_kernel(int length, int* input) {
printf("HERE\n");
for(int i=0; i< length; i++) {
printf("input[%d] = %d\n", i, input[i]);
}
}
__global__ void compare_array(int length, int* array1, int* array2) {
for(int i=0; i< length; i++) {
if(array1[i] != array2[i]) {
printf("Arrays don't match. Expected = %d, Got = %d\n", array1[i], array2[i]);
}
}
}
__global__ void getRefCircleArray(int* refCircleImgArray) {
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//printf("ID: %d\n" , index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles);
refCircleImgArray[index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles] = circleInBox;
}
}
//predicate functor
template <typename T>
struct is_not_zero : public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(T x)
{
return (x != 0);
}
};
// convert a linear index to + data_per_thread * tid a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
__global__ void kernelRenderSmallCircles(int index, int imageWidth, int imageHeight, int screenMinX, int screenMinY, int screenMaxX, int screenMaxY) {
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
int x = blockIdx.x*blockDim.x + threadIdx.x + screenMinX;
int y = blockIdx.y*blockDim.y + threadIdx.y + screenMinY;
if(x >= screenMaxX) return;
if(y >= screenMaxY) return;
/*
const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
if(offset >= (screenMaxX - screenMinX) * (screenMaxY - screenMinY)) return;
int x = (offset % (screenMaxX - screenMinX)) + screenMinX;
int y = (offset / (screenMaxX - screenMinX)) + screenMinY;
*/
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixelSmallCircles(index, pixelCenterNorm, p, imgPtr);
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
int* circleImgBlockArray = NULL;
int* circleImgBlockId = NULL;
//printf("NumCircles = %d\n",numCircles);
if (numCircles < 5) {
for (int i = 0; i < numCircles; i++) {
// read p sition and radius
int index3 = 3 * i;
float3 p = *(float3*)(&position[index3]);
float rad = radius[i];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
dim3 blockDim(16, 16);
dim3 gridDim(((screenMaxX - screenMinX) + blockDim.x - 1) / blockDim.x, ((screenMaxY - screenMinY) + blockDim.y - 1) / blockDim.y);
kernelRenderSmallCircles<<<gridDim, blockDim>>>(i, imageWidth, imageHeight, screenMinX, screenMinY, screenMaxX, screenMaxY);
gpuErrchk(cudaDeviceSynchronize());
}
} else {
int imgBlockNum = 32;
int numImgBlocks = imgBlockNum * imgBlockNum;
int numElements = numCircles * imgBlockNum * imgBlockNum;
cudaMalloc(&circleImgBlockArray, sizeof(int) * numElements);
cudaMalloc(&circleImgBlockId, sizeof(int) * numElements);
//gpuErrchk(cudaDeviceSynchronize());
dim3 blockDim(512, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
make_circleImgBlockArray<<<gridDim, blockDim>>>(circleImgBlockArray,circleImgBlockId,imageWidth/imgBlockNum, imgBlockNum);
/*Convert the 2D circle block array into 1 D array by removing 0 values */
thrust::device_ptr<int> thrust_arr = thrust::device_pointer_cast(circleImgBlockArray);
thrust::device_ptr<int> thrust_circleid = thrust::device_pointer_cast(circleImgBlockId);
// allocate storage for rowu sums and indices
thrust::device_vector<int> row_sums(numImgBlocks+1);
thrust::device_vector<int> row_indices(numImgBlocks);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)) + numElements,
thrust_arr,
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
thrust::fill(thrust::device, row_sums.end() - 1, row_sums.end(), 0);
//thrust::copy(row_sums.begin(), row_sums.end(), std::ostream_iterator<int>(std::cout, " "));
thrust::device_vector<int> circleStartAddr(numImgBlocks+1);
thrust::exclusive_scan(row_sums.begin(), row_sums.end(), circleStartAddr.begin());
//thrust::copy(circleStartAddr.begin(), circleStartAddr.end(), std::ostream_iterator<float>(std::cout, " "));
//int num_pairs = thrust::reduce(thrust_arr, thrust_arr + numElements);
int num_pairs = circleStartAddr[numImgBlocks];
//printf("SUM = %d\n", num_pairs);
//cudaFree(circleImgBlockArray);
//allocate the right size of array
//This array will be traversed by each block -- by using starting address from circleStartAddr
thrust::device_vector<int> circleImgBlockList(num_pairs);
thrust::copy_if(thrust_circleid, thrust_circleid + numElements, circleImgBlockList.begin(), is_not_zero<int>());
//cudaFree(circleImgBlockId);
//thrust::copy(circleImgBlockList.begin(), circleImgBlockList.end(), std::ostream_iterator<float>(std::cout, " "));
dim3 gridDim3(imgBlockNum, imgBlockNum);
dim3 blockDim3(1024/imgBlockNum,1024/imgBlockNum);
int *deviceStartAddr = NULL;
deviceStartAddr = thrust::raw_pointer_cast(circleStartAddr.data());
int *deviceImgBlockList = NULL;
deviceImgBlockList = thrust::raw_pointer_cast(circleImgBlockList.data());
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
kernelRenderCircles<<<gridDim3, blockDim3>>>(deviceImgBlockList, deviceStartAddr);
gpuErrchk(cudaDeviceSynchronize());
}
}
|
80e3ce5b1c11a512c7dfee528714cc960a6334ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define MAX_FLOAT (3.4028235e38)
extern "C" {
__global__ void computeVarianceEstimatesKernel(const int h, const int w, float* varianceEstimates, const float* input);
}
__global__ void computeVarianceEstimatesKernel(const int h, const int w, float* varianceEstimates, const float* input) {
const int i = blockIdx.y;
const int bj = blockIdx.x;
const int wtj = threadIdx.y;
const int ttj = threadIdx.x;
const int nrThreadsW = min(1024, w);
const int nrThreadsNrThreadsW = min(32, nrThreadsW);
const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj;
if (tj < nrThreadsW) {
const int j = bj * (1 * nrThreadsW) + tj;
if (j < w) {
float res = MAX_FLOAT;
for (int filterSize = 3; filterSize <= 9; filterSize += 2) {
const int border = filterSize / 2;
float sum = 0.0;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
const int row = i + fi - border;
const int col = j + fj - border;
if (row >= 0 && row < h) {
if (col >= 0 && col < w) {
sum += input[col + row * (1 * w)];
}
}
}
}
sum /= (float) filterSize * filterSize;
if (sum < res) {
res = sum;
}
}
varianceEstimates[j + i * (1 * w)] = res;
}
}
}
| 80e3ce5b1c11a512c7dfee528714cc960a6334ea.cu | // fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define MAX_FLOAT (3.4028235e38)
extern "C" {
__global__ void computeVarianceEstimatesKernel(const int h, const int w, float* varianceEstimates, const float* input);
}
__global__ void computeVarianceEstimatesKernel(const int h, const int w, float* varianceEstimates, const float* input) {
const int i = blockIdx.y;
const int bj = blockIdx.x;
const int wtj = threadIdx.y;
const int ttj = threadIdx.x;
const int nrThreadsW = min(1024, w);
const int nrThreadsNrThreadsW = min(32, nrThreadsW);
const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj;
if (tj < nrThreadsW) {
const int j = bj * (1 * nrThreadsW) + tj;
if (j < w) {
float res = MAX_FLOAT;
for (int filterSize = 3; filterSize <= 9; filterSize += 2) {
const int border = filterSize / 2;
float sum = 0.0;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
const int row = i + fi - border;
const int col = j + fj - border;
if (row >= 0 && row < h) {
if (col >= 0 && col < w) {
sum += input[col + row * (1 * w)];
}
}
}
}
sum /= (float) filterSize * filterSize;
if (sum < res) {
res = sum;
}
}
varianceEstimates[j + i * (1 * w)] = res;
}
}
}
|
5da4da815488684b62aeb0ea0dbd6df15d4e7381.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void grayscale(unsigned char * data_rgb, unsigned char * data_gray, std::size_t rows, std::size_t cols)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if( i < cols && j < rows )
{
data_gray[ j * cols + i ] = (
307 * data_rgb[ 3 * (j * cols + i) ]
+ 604 * data_rgb[ 3 * (j * cols + i) + 1 ]
+ 113 * data_rgb[ 3 * (j * cols + i) + 2 ]
) / 1024;
}
}
__global__ void laplacian_of_gaussian(unsigned char const * const data_gray, unsigned char * const data_out, std::size_t rows, std::size_t cols)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if( i > 2 && i < (cols - 2) && j > 2 && j < (rows - 2))
{
// Tous les pixels que l'on multiplie par 16
auto result = data_gray[(j * cols + i)] * 16
// Tous les pixels que l'on multiplie par -2
+ ( data_gray[((j-1) * cols + i)] + data_gray[((j+1) * cols + i)] + data_gray[(j * cols + (i-1))] + data_gray[(j * cols + (i+1))] ) * -2
// Tous les pixels que l'on multiplie par -1
+ ( data_gray[((j-2) * cols + i)] + data_gray[((j+2) * cols + i)] + data_gray[(j * cols + (i-2))] + data_gray[(j * cols + (i+2))]
+ data_gray[((j-1) * cols + (i-1))] + data_gray[((j-1) * cols + (i+1))] + data_gray[((j+1) * cols + (i-1))] + data_gray[((j+1) * cols + (i+1))] ) * -1;
result = result * result;
result = result > 255*255 ? result = 255*255 : result;
data_out[ j * cols + i ] = sqrt((float) result);
}
}
int main(int argc, char** argv)
{
//printf("Number of argument : %d\n", argc);
if(argc >= 2){
int threadSize = 32;
if(argc == 3){
threadSize = atoi(argv[2]);
}
// Mesure de temps
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Rcupre l'image
cv::Mat image_in = cv::imread(argv[1], cv::IMREAD_UNCHANGED);
// Rcupre les informations des pixels
auto data_rgb = image_in.data;
auto rows = image_in.rows;
auto cols = image_in.cols;
std::cout << "rows = " << rows << " columns = " << cols << std::endl;
// On cre les informations de sorties
std::vector<unsigned char> out(rows * cols);
// On cre l'image de sortie
cv::Mat image_out(rows, cols, CV_8UC1, out.data());
// On copie l'image d'entre sur le device
unsigned char * data_rgb_device;
unsigned char * data_gray_device;
// On cre une copie des informations de sortie sur le device
unsigned char* data_out_device;
hipMalloc(&data_rgb_device, 3 * rows * cols); // 1 pixel = 3 couleurs
hipMalloc(&data_gray_device, rows * cols);
hipMalloc(&data_out_device, rows * cols);
hipMemcpy(data_rgb_device, data_rgb, 3 * rows * cols, hipMemcpyHostToDevice );
dim3 threads(threadSize, threadSize );
dim3 blocks(( cols -1 ) / threads.x + 1 , ( rows - 1) / threads.y + 1);
std::cout << "Nombre de threads = " << threads.x << " " << threads.y << std::endl;
std::cout << "Nombre de blocks = " << blocks.x << " " << blocks.y << std::endl;
// Lancement du timer
hipEventRecord(start);
hipLaunchKernelGGL(( grayscale), dim3(blocks) , dim3(threads) , 0, 0, data_rgb_device, data_gray_device, rows, cols);
// lancement du programme
hipLaunchKernelGGL(( laplacian_of_gaussian), dim3(blocks) , dim3(threads) , 0, 0, data_gray_device, data_out_device, rows, cols);
// On arrte le timer
hipEventRecord(stop);
hipDeviceSynchronize();
/*auto err = hipGetLastError();
if( err != hipSuccess )
{
printf("Errors found :\n %s", hipGetErrorString(err));
}*/
// On copie les informations de sortie du device vers le host
hipMemcpy(out.data(), data_out_device, rows * cols, hipMemcpyDeviceToHost );
// On rcupre le temps d'excution
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Execution time : %f\n",milliseconds);
cv::imwrite( "outCudaV1.jpg", image_out);
// On libre l'espace sur le device
hipFree(data_rgb_device);
hipFree(data_gray_device);
hipFree(data_out_device);
}
return 0;
}
| 5da4da815488684b62aeb0ea0dbd6df15d4e7381.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void grayscale(unsigned char * data_rgb, unsigned char * data_gray, std::size_t rows, std::size_t cols)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if( i < cols && j < rows )
{
data_gray[ j * cols + i ] = (
307 * data_rgb[ 3 * (j * cols + i) ]
+ 604 * data_rgb[ 3 * (j * cols + i) + 1 ]
+ 113 * data_rgb[ 3 * (j * cols + i) + 2 ]
) / 1024;
}
}
__global__ void laplacian_of_gaussian(unsigned char const * const data_gray, unsigned char * const data_out, std::size_t rows, std::size_t cols)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if( i > 2 && i < (cols - 2) && j > 2 && j < (rows - 2))
{
// Tous les pixels que l'on multiplie par 16
auto result = data_gray[(j * cols + i)] * 16
// Tous les pixels que l'on multiplie par -2
+ ( data_gray[((j-1) * cols + i)] + data_gray[((j+1) * cols + i)] + data_gray[(j * cols + (i-1))] + data_gray[(j * cols + (i+1))] ) * -2
// Tous les pixels que l'on multiplie par -1
+ ( data_gray[((j-2) * cols + i)] + data_gray[((j+2) * cols + i)] + data_gray[(j * cols + (i-2))] + data_gray[(j * cols + (i+2))]
+ data_gray[((j-1) * cols + (i-1))] + data_gray[((j-1) * cols + (i+1))] + data_gray[((j+1) * cols + (i-1))] + data_gray[((j+1) * cols + (i+1))] ) * -1;
result = result * result;
result = result > 255*255 ? result = 255*255 : result;
data_out[ j * cols + i ] = sqrt((float) result);
}
}
int main(int argc, char** argv)
{
//printf("Number of argument : %d\n", argc);
if(argc >= 2){
int threadSize = 32;
if(argc == 3){
threadSize = atoi(argv[2]);
}
// Mesure de temps
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Récupère l'image
cv::Mat image_in = cv::imread(argv[1], cv::IMREAD_UNCHANGED);
// Récupère les informations des pixels
auto data_rgb = image_in.data;
auto rows = image_in.rows;
auto cols = image_in.cols;
std::cout << "rows = " << rows << " columns = " << cols << std::endl;
// On crée les informations de sorties
std::vector<unsigned char> out(rows * cols);
// On crée l'image de sortie
cv::Mat image_out(rows, cols, CV_8UC1, out.data());
// On copie l'image d'entrée sur le device
unsigned char * data_rgb_device;
unsigned char * data_gray_device;
// On crée une copie des informations de sortie sur le device
unsigned char* data_out_device;
cudaMalloc(&data_rgb_device, 3 * rows * cols); // 1 pixel = 3 couleurs
cudaMalloc(&data_gray_device, rows * cols);
cudaMalloc(&data_out_device, rows * cols);
cudaMemcpy(data_rgb_device, data_rgb, 3 * rows * cols, cudaMemcpyHostToDevice );
dim3 threads(threadSize, threadSize );
dim3 blocks(( cols -1 ) / threads.x + 1 , ( rows - 1) / threads.y + 1);
std::cout << "Nombre de threads = " << threads.x << " " << threads.y << std::endl;
std::cout << "Nombre de blocks = " << blocks.x << " " << blocks.y << std::endl;
// Lancement du timer
cudaEventRecord(start);
grayscale<<< blocks , threads >>>(data_rgb_device, data_gray_device, rows, cols);
// lancement du programme
laplacian_of_gaussian<<< blocks , threads >>>(data_gray_device, data_out_device, rows, cols);
// On arrête le timer
cudaEventRecord(stop);
cudaDeviceSynchronize();
/*auto err = cudaGetLastError();
if( err != cudaSuccess )
{
printf("Errors found :\n %s", cudaGetErrorString(err));
}*/
// On copie les informations de sortie du device vers le host
cudaMemcpy(out.data(), data_out_device, rows * cols, cudaMemcpyDeviceToHost );
// On récupère le temps d'exécution
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Execution time : %f\n",milliseconds);
cv::imwrite( "outCudaV1.jpg", image_out);
// On libère l'espace sur le device
cudaFree(data_rgb_device);
cudaFree(data_gray_device);
cudaFree(data_out_device);
}
return 0;
}
|
9db9d922060b29c1caa675fe7664fe53da1b9e16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
// implemented algorithm is GPU adaptation of algorithm described in following article:
// "MergeShuffle: A Very Fast, Parallel Random Permutation Algorithm", https://arxiv.org/abs/1508.03167
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <numeric>
#include <execution/Threads.h>
#include <helpers/ShapeUtils.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void fisherYatesCuda(sd::graph::RandomGenerator* rng, void* vx, const Nd4jLong ews, const Nd4jLong len, const int power) {
T* x = reinterpret_cast<T*>(vx);
__shared__ T* shmem, temp;
__shared__ Nd4jLong ind, blockOffset, lenPerBlock;
if (threadIdx.x == 0) {
extern __shared__ unsigned char sharedMemory[];
shmem = reinterpret_cast<T*>(sharedMemory);
blockOffset = (len * blockIdx.x) >> power;
lenPerBlock = ((len * (blockIdx.x + 1)) >> power) - blockOffset;
ind = blockOffset;
}
__syncthreads();
// copy from global memory to shared memory
if(threadIdx.x < lenPerBlock)
shmem[threadIdx.x] = x[(blockOffset + threadIdx.x) * ews];
__syncthreads();
// *** apply Fisher-Yates shuffle to lenPerBlock number of elements
if (threadIdx.x == 0) {
for(Nd4jLong i = lenPerBlock - 1; i > 0; --i) {
const Nd4jLong j = rng->relativeLong(ind++) % (i + 1);
if(i != j) {
temp = shmem[i];
shmem[i] = shmem[j];
shmem[j] = temp;
}
}
}
__syncthreads();
// copy from shared memory to global memory
if(threadIdx.x < lenPerBlock)
x[(blockOffset + threadIdx.x) * ews] = shmem[threadIdx.x];
}
template <typename T>
static __global__ void mergeShuffleCuda(sd::graph::RandomGenerator* rng, void* vx, const Nd4jLong ews, const Nd4jLong len, const int power, const Nd4jLong iterNum) {
T* x = reinterpret_cast<T*>(vx);
__shared__ Nd4jLong ind, blockOffset, factor, beg, mid, totLen, iterExp;
// *** apply mergeShuffle algorithm
if(threadIdx.x == 0) {
factor = blockIdx.x << iterNum;
iterExp = 1 << (iterNum - 1);
blockOffset = (len * factor) >> power;
mid = ((len * (factor + iterExp)) >> power) - blockOffset; // middle
totLen = ((len * (factor + 2*iterExp)) >> power) - blockOffset;
ind = iterNum * len + blockOffset;
beg = 0; // beginning
// printf("m %lld, blockIdx.x %lld, factor %lld, blockOffset %lld, mid %lld, totLen %lld \n", m,k,factor,blockOffset,mid,totLen);
while (true) {
if(rng->relativeLong(ind++) % 2) {
if(mid == totLen)
break;
math::nd4j_swap<T>(x[(blockOffset + beg) * ews], x[(blockOffset + mid++) * ews]);
} else {
if(beg == mid)
break;
}
++beg;
}
// Fisher-Yates
while (beg < totLen) {
const Nd4jLong e = rng->relativeLong(ind++) % (beg + 1);
if(beg != e)
math::nd4j_swap<T>(x[(blockOffset + beg) * ews], x[(blockOffset + e) * ews]);
++beg;
}
}
}
//////////////////////////////////////////////////////////////////////////
// Fisher-Yates shuffle
template <typename T>
static void fisherYates(sd::graph::RandomGenerator& rng, T* buff, const Nd4jLong& len, const Nd4jLong& ews, Nd4jLong ind) {
for(Nd4jLong i = len-1; i > 0; --i) {
const Nd4jLong j = rng.relativeLong(ind++) % (i + 1);
if(i != j)
math::nd4j_swap<T>(buff[i*ews], buff[j*ews]);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void randomShuffle_(sd::LaunchContext* context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) {
const int firstDim = input.sizeAt(0);
int temp;
if(input.lengthOf() == 1 || firstDim == 1) {
if(!isInplace)
output.assign(input);
}
else if (shape::isCommonVector(input.shapeInfo(), temp)) {
NDArray* arr = &input;
if (!isInplace) {
output.assign(input);
arr = &output;
}
const Nd4jLong len = arr->lengthOf();
const int threadsPerBlock = MAX_NUM_THREADS;
int power = 0;
while ((len >> power) > threadsPerBlock)
++power;
const int blocksPerGrid = 1 << power;
const int sharedMem = threadsPerBlock * input.sizeOfT() + 256;
PointersManager manager(context, "NDArray::randomShuffle cuda");
sd::graph::RandomGenerator* pRng = reinterpret_cast<sd::graph::RandomGenerator*>(manager.replicatePointer(&rng, sizeof(sd::graph::RandomGenerator)));
NDArray::prepareSpecialUse({arr}, {arr});
hipLaunchKernelGGL(( fisherYatesCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *context->getCudaStream(), pRng, arr->specialBuffer(), arr->ews(), len, power);
for (Nd4jLong j = 1, i = 1; j < blocksPerGrid; j += j, ++i)
hipLaunchKernelGGL(( mergeShuffleCuda<T>), dim3(blocksPerGrid/(2*j)), dim3(threadsPerBlock), 256, *context->getCudaStream(), pRng, arr->specialBuffer(), arr->ews(), len, power, i);
NDArray::registerSpecialUse({arr}, {arr});
manager.synchronize();
rng.rewindH((len + 1) * power);
}
else {
auto dimsToExclude = ShapeUtils::evalDimsToExclude(input.rankOf(), {0});
if(isInplace) {
auto subArrsList = input.allTensorsAlongDimension(dimsToExclude);
// Fisher-Yates shuffle
for(int i = firstDim - 1; i > 0; --i) {
const int j = rng.relativeInt(i) % (i + 1);
if(i != j)
subArrsList.at(i)->swapUnsafe(*subArrsList.at(j));
}
}
else {
auto subArrsListIn = input.allTensorsAlongDimension(dimsToExclude);
auto subArrsListOut = output.allTensorsAlongDimension(dimsToExclude);
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0); // 0,1,2,3, ... firstDim-1
// shuffle indices
fisherYates<int>(rng, indices.data(), firstDim, 1, 0);
auto func = PRAGMA_THREADS_FOR {
for (auto i = start; i < stop; ++i)
subArrsListOut.at(i)->assign(subArrsListIn.at(indices[i]));
};
samediff::Threads::parallel_for(func, 0, firstDim);
}
rng.rewindH(firstDim-1);
}
}
/////////////////////////////////////////////////////////////////////////
void randomShuffle(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (sd::LaunchContext* context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES);
}
}
} | 9db9d922060b29c1caa675fe7664fe53da1b9e16.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
// implemented algorithm is GPU adaptation of algorithm described in following article:
// "MergeShuffle: A Very Fast, Parallel Random Permutation Algorithm", https://arxiv.org/abs/1508.03167
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <numeric>
#include <execution/Threads.h>
#include <helpers/ShapeUtils.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void fisherYatesCuda(sd::graph::RandomGenerator* rng, void* vx, const Nd4jLong ews, const Nd4jLong len, const int power) {
T* x = reinterpret_cast<T*>(vx);
__shared__ T* shmem, temp;
__shared__ Nd4jLong ind, blockOffset, lenPerBlock;
if (threadIdx.x == 0) {
extern __shared__ unsigned char sharedMemory[];
shmem = reinterpret_cast<T*>(sharedMemory);
blockOffset = (len * blockIdx.x) >> power;
lenPerBlock = ((len * (blockIdx.x + 1)) >> power) - blockOffset;
ind = blockOffset;
}
__syncthreads();
// copy from global memory to shared memory
if(threadIdx.x < lenPerBlock)
shmem[threadIdx.x] = x[(blockOffset + threadIdx.x) * ews];
__syncthreads();
// *** apply Fisher-Yates shuffle to lenPerBlock number of elements
if (threadIdx.x == 0) {
for(Nd4jLong i = lenPerBlock - 1; i > 0; --i) {
const Nd4jLong j = rng->relativeLong(ind++) % (i + 1);
if(i != j) {
temp = shmem[i];
shmem[i] = shmem[j];
shmem[j] = temp;
}
}
}
__syncthreads();
// copy from shared memory to global memory
if(threadIdx.x < lenPerBlock)
x[(blockOffset + threadIdx.x) * ews] = shmem[threadIdx.x];
}
template <typename T>
static __global__ void mergeShuffleCuda(sd::graph::RandomGenerator* rng, void* vx, const Nd4jLong ews, const Nd4jLong len, const int power, const Nd4jLong iterNum) {
T* x = reinterpret_cast<T*>(vx);
__shared__ Nd4jLong ind, blockOffset, factor, beg, mid, totLen, iterExp;
// *** apply mergeShuffle algorithm
if(threadIdx.x == 0) {
factor = blockIdx.x << iterNum;
iterExp = 1 << (iterNum - 1);
blockOffset = (len * factor) >> power;
mid = ((len * (factor + iterExp)) >> power) - blockOffset; // middle
totLen = ((len * (factor + 2*iterExp)) >> power) - blockOffset;
ind = iterNum * len + blockOffset;
beg = 0; // beginning
// printf("m %lld, blockIdx.x %lld, factor %lld, blockOffset %lld, mid %lld, totLen %lld \n", m,k,factor,blockOffset,mid,totLen);
while (true) {
if(rng->relativeLong(ind++) % 2) {
if(mid == totLen)
break;
math::nd4j_swap<T>(x[(blockOffset + beg) * ews], x[(blockOffset + mid++) * ews]);
} else {
if(beg == mid)
break;
}
++beg;
}
// Fisher-Yates
while (beg < totLen) {
const Nd4jLong e = rng->relativeLong(ind++) % (beg + 1);
if(beg != e)
math::nd4j_swap<T>(x[(blockOffset + beg) * ews], x[(blockOffset + e) * ews]);
++beg;
}
}
}
//////////////////////////////////////////////////////////////////////////
// Fisher-Yates shuffle
template <typename T>
static void fisherYates(sd::graph::RandomGenerator& rng, T* buff, const Nd4jLong& len, const Nd4jLong& ews, Nd4jLong ind) {
for(Nd4jLong i = len-1; i > 0; --i) {
const Nd4jLong j = rng.relativeLong(ind++) % (i + 1);
if(i != j)
math::nd4j_swap<T>(buff[i*ews], buff[j*ews]);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void randomShuffle_(sd::LaunchContext* context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) {
const int firstDim = input.sizeAt(0);
int temp;
if(input.lengthOf() == 1 || firstDim == 1) {
if(!isInplace)
output.assign(input);
}
else if (shape::isCommonVector(input.shapeInfo(), temp)) {
NDArray* arr = &input;
if (!isInplace) {
output.assign(input);
arr = &output;
}
const Nd4jLong len = arr->lengthOf();
const int threadsPerBlock = MAX_NUM_THREADS;
int power = 0;
while ((len >> power) > threadsPerBlock)
++power;
const int blocksPerGrid = 1 << power;
const int sharedMem = threadsPerBlock * input.sizeOfT() + 256;
PointersManager manager(context, "NDArray::randomShuffle cuda");
sd::graph::RandomGenerator* pRng = reinterpret_cast<sd::graph::RandomGenerator*>(manager.replicatePointer(&rng, sizeof(sd::graph::RandomGenerator)));
NDArray::prepareSpecialUse({arr}, {arr});
fisherYatesCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *context->getCudaStream()>>>(pRng, arr->specialBuffer(), arr->ews(), len, power);
for (Nd4jLong j = 1, i = 1; j < blocksPerGrid; j += j, ++i)
mergeShuffleCuda<T><<<blocksPerGrid/(2*j), threadsPerBlock, 256, *context->getCudaStream()>>>(pRng, arr->specialBuffer(), arr->ews(), len, power, i);
NDArray::registerSpecialUse({arr}, {arr});
manager.synchronize();
rng.rewindH((len + 1) * power);
}
else {
auto dimsToExclude = ShapeUtils::evalDimsToExclude(input.rankOf(), {0});
if(isInplace) {
auto subArrsList = input.allTensorsAlongDimension(dimsToExclude);
// Fisher-Yates shuffle
for(int i = firstDim - 1; i > 0; --i) {
const int j = rng.relativeInt(i) % (i + 1);
if(i != j)
subArrsList.at(i)->swapUnsafe(*subArrsList.at(j));
}
}
else {
auto subArrsListIn = input.allTensorsAlongDimension(dimsToExclude);
auto subArrsListOut = output.allTensorsAlongDimension(dimsToExclude);
std::vector<int> indices(firstDim);
std::iota(indices.begin(), indices.end(), 0); // 0,1,2,3, ... firstDim-1
// shuffle indices
fisherYates<int>(rng, indices.data(), firstDim, 1, 0);
auto func = PRAGMA_THREADS_FOR {
for (auto i = start; i < stop; ++i)
subArrsListOut.at(i)->assign(subArrsListIn.at(indices[i]));
};
samediff::Threads::parallel_for(func, 0, firstDim);
}
rng.rewindH(firstDim-1);
}
}
/////////////////////////////////////////////////////////////////////////
void randomShuffle(sd::LaunchContext * context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace) {
BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (sd::LaunchContext* context, NDArray& input, NDArray& output, sd::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES);
}
}
} |
dfedae851863d804019c3ed7cc2c71d0a1014266.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file heterogeneous_contraction.cu
*
* @brief Contracts three tensor network files on two gpus
* and one cpu simultaneously
*
*/
#include <iostream>
#include "CudaTensor.hpp"
#include "PathInfo.hpp"
#include "TaskBasedContractor.hpp"
#include "Tensor.hpp"
#include "TensorNetwork.hpp"
#include "TensorNetworkIO.hpp"
#include <hip/hip_complex.h>
#include <taskflow/cudaflow.hpp>
using namespace Jet;
template <typename T, int device = 0> struct CudaflowContractionTask {
std::vector<std::unique_ptr<CudaTensor<T, device>>> tensors;
std::vector<typename CudaTensor<T, device>::CudaContractionPlan> plans;
std::vector<tf::cudaTask> kernel_tasks;
std::vector<T> result;
};
template <typename T, int device = 0>
void AddCudaContractionToTaskflow(
const TensorNetwork<CudaTensor<T, device>> &tn,
const PathInfo &path_info, tf::Taskflow &taskflow,
CudaflowContractionTask<T, device> &gpu_task)
{
auto &tensors = gpu_task.tensors;
auto &plans = gpu_task.plans;
auto &result = gpu_task.result;
auto &kernel_tasks = gpu_task.kernel_tasks;
const auto &path_node_info = path_info.GetSteps();
const auto &path = path_info.GetPath();
const auto &nodes = tn.GetNodes();
size_t num_leafs = nodes.size();
tensors.resize(path_node_info.size());
plans.resize(path.size());
for (size_t i = 0; i < path.size(); i++) {
const PathStepInfo &pnia = path_node_info[path[i].first];
const PathStepInfo &pnib = path_node_info[path[i].second];
const PathStepInfo &pnic = path_node_info[num_leafs + i];
if (pnia.id >= num_leafs) {
tensors[path[i].first] =
std::make_unique<CudaTensor<hipComplex, device>>(
CudaTensor<hipComplex, device>(pnia.tensor_indices,
pnia.shape));
}
else {
tensors[path[i].first] =
std::make_unique<CudaTensor<hipComplex, device>>(
CudaTensor<hipComplex, device>(
tn.GetNodes()[pnia.id].tensor));
}
if (pnib.id >= num_leafs) {
tensors[path[i].second] =
std::make_unique<CudaTensor<hipComplex, device>>(
CudaTensor<hipComplex, device>(pnib.tensor_indices,
pnib.shape));
}
else {
tensors[path[i].second] =
std::make_unique<CudaTensor<hipComplex, device>>(
CudaTensor<hipComplex, device>(
tn.GetNodes()[pnib.id].tensor));
}
tensors[num_leafs + i] =
std::make_unique<CudaTensor<hipComplex, device>>(
CudaTensor<hipComplex, device>(pnic.tensor_indices, pnic.shape));
CudaTensor<hipComplex, device>::GetCudaContractionPlan(
plans[i], *tensors[path[i].first], *tensors[path[i].second],
*tensors[num_leafs + i]);
}
tf::Task task = taskflow.emplace_on(
[&,path,path_node_info,num_leafs](tf::cudaFlowCapturer &capturer) {
for (int i = 0; i < path.size(); i++) {
const PathStepInfo &pnia = path_node_info[path[i].first];
const PathStepInfo &pnib = path_node_info[path[i].second];
const PathStepInfo &pnic = path_node_info[num_leafs + i];
auto tensor_a = tensors[path[i].first]->GetData();
auto tensor_b = tensors[path[i].second]->GetData();
auto tensor_c = tensors[num_leafs + i]->GetData();
auto &c_plan = plans[i];
tf::cudaTask kernel =
capturer.on([&, c_plan, tensor_a, tensor_b,
tensor_c](hipStream_t stream) {
hipComplex alpha;
alpha.x = 1.;
alpha.y = 0.;
hipComplex beta;
beta.x = 0.;
beta.y = 0.;
cutensorContraction(&c_plan.handle, &c_plan.plan,
&alpha, tensor_a, tensor_b, &beta,
tensor_c, tensor_c, c_plan.work,
c_plan.work_size, stream);
});
kernel_tasks.push_back(kernel);
if (pnia.id >= num_leafs) {
kernel_tasks[pnia.id - num_leafs].precede(kernel);
}
if (pnib.id >= num_leafs) {
kernel_tasks[pnib.id - num_leafs].precede(kernel);
}
// copy data from gpu_data to host_data
if (i == path.size() - 1) {
result.resize(tensors[pnic.id]->GetSize());
tf::cudaTask d2h = capturer.memcpy(
result.data(), tensors[pnic.id]->GetData(),
tensors[pnic.id]->GetSize() * sizeof(hipComplex));
kernel.precede(d2h);
}
}
},
device);
}
int main(int argc, char *argv[])
{
if (argc != 4) {
std::cout << "heterogeneous_contraction.cu <tensor network file 1 on GPU 0> "
"<tensor network file 2 on GPU 1> <tensor network file 3 on CPU>"
<< std::endl;
std::cout << "Contracts three circuits on two GPUs and one CPU"
<< std::endl;
}
std::string file_name_0 = argv[1];
std::string file_name_1 = argv[2];
std::string file_name_2 = argv[3];
/*
* Load first tensor network file onto GPU 0
*/
TensorNetworkFile<CudaTensor<hipComplex, 0>> tensor_file_0;
try {
std::ifstream tn_data(file_name_0);
std::string circuit_str{std::istreambuf_iterator<char>(tn_data),
std::istreambuf_iterator<char>()};
// Load data into TensorNetwork and PathInfo objects
TensorNetworkSerializer<CudaTensor<hipComplex, 0>> serializer;
tensor_file_0 = serializer(circuit_str, true);
}
catch (...) {
std::cerr << "Please specify a valid first JSON file to contract"
<< std::endl;
exit(1);
}
TensorNetwork<CudaTensor<hipComplex, 0>> tn_0 = tensor_file_0.tensors;
PathInfo path_0 = tensor_file_0.path.value();
/**
* Load second tensor network file onto GPU 1
*/
TensorNetworkFile<CudaTensor<hipComplex, 1>> tensor_file_1;
try {
std::ifstream tn_data(file_name_1);
std::string circuit_str{std::istreambuf_iterator<char>(tn_data),
std::istreambuf_iterator<char>()};
// Load data into TensorNetwork and PathInfo objects
TensorNetworkSerializer<CudaTensor<hipComplex, 1>> serializer;
tensor_file_1 = serializer(circuit_str, true);
}
catch (...) {
std::cerr << "Please specify a valid second JSON file to contract"
<< std::endl;
exit(1);
}
TensorNetwork<CudaTensor<hipComplex, 1>> tn_1 = tensor_file_1.tensors;
PathInfo path_1 = tensor_file_1.path.value();
/**
* Load third tensor network file onto CPU
*/
TensorNetworkFile<Tensor<std::complex<float>>> tensor_file_2;
try {
std::ifstream tn_data(file_name_2);
std::string circuit_str{std::istreambuf_iterator<char>(tn_data),
std::istreambuf_iterator<char>()};
// Load data into TensorNetwork and PathInfo objects
TensorNetworkSerializer<Tensor<std::complex<float>>> serializer;
tensor_file_2 = serializer(circuit_str, true);
}
catch (...) {
std::cerr << "Please specify a valid JSON file to contract"
<< std::endl;
exit(1);
}
TensorNetwork<Tensor<std::complex<float>>> tn_2 =
tensor_file_2.tensors;
PathInfo path_2 = tensor_file_2.path.value();
tf::Taskflow taskflow;
/* set up gpu 0 contraction task */
CudaflowContractionTask<hipComplex, 0> gpu_task_0;
AddCudaContractionToTaskflow<hipComplex, 0>(tn_0, path_0, taskflow,
gpu_task_0);
/* set up gpu 1 contraction task */
CudaflowContractionTask<hipComplex, 1> gpu_task_1;
AddCudaContractionToTaskflow<hipComplex, 1>(tn_1, path_1, taskflow,
gpu_task_1);
/* set up cpu contraction task */
TaskBasedContractor<Tensor<std::complex<float>>> contractor;
contractor.AddContractionTasks(tn_2, path_2);
// Add gpu task graph to cpu task graph
contractor.AddTaskflow(taskflow);
/* Contract on all devices */
contractor.Contract().wait();
/* Display results */
auto result0 = gpu_task_0.result;
std::cout << "GPU 0 result = " << result0[0].x << " " << result0[0].y
<< std::endl;
auto result1 = gpu_task_1.result;
std::cout << "GPU 1 result = " << result1[0].x << " " << result1[0].y
<< std::endl;
auto result2 = contractor.GetResults()[0];
std::cout << "CPU result = " << result2 << std::endl;
return 0;
}
| dfedae851863d804019c3ed7cc2c71d0a1014266.cu | /**
* @file heterogeneous_contraction.cu
*
* @brief Contracts three tensor network files on two gpus
* and one cpu simultaneously
*
*/
#include <iostream>
#include "CudaTensor.hpp"
#include "PathInfo.hpp"
#include "TaskBasedContractor.hpp"
#include "Tensor.hpp"
#include "TensorNetwork.hpp"
#include "TensorNetworkIO.hpp"
#include <cuComplex.h>
#include <taskflow/cudaflow.hpp>
using namespace Jet;
template <typename T, int device = 0> struct CudaflowContractionTask {
std::vector<std::unique_ptr<CudaTensor<T, device>>> tensors;
std::vector<typename CudaTensor<T, device>::CudaContractionPlan> plans;
std::vector<tf::cudaTask> kernel_tasks;
std::vector<T> result;
};
template <typename T, int device = 0>
void AddCudaContractionToTaskflow(
const TensorNetwork<CudaTensor<T, device>> &tn,
const PathInfo &path_info, tf::Taskflow &taskflow,
CudaflowContractionTask<T, device> &gpu_task)
{
auto &tensors = gpu_task.tensors;
auto &plans = gpu_task.plans;
auto &result = gpu_task.result;
auto &kernel_tasks = gpu_task.kernel_tasks;
const auto &path_node_info = path_info.GetSteps();
const auto &path = path_info.GetPath();
const auto &nodes = tn.GetNodes();
size_t num_leafs = nodes.size();
tensors.resize(path_node_info.size());
plans.resize(path.size());
for (size_t i = 0; i < path.size(); i++) {
const PathStepInfo &pnia = path_node_info[path[i].first];
const PathStepInfo &pnib = path_node_info[path[i].second];
const PathStepInfo &pnic = path_node_info[num_leafs + i];
if (pnia.id >= num_leafs) {
tensors[path[i].first] =
std::make_unique<CudaTensor<cuComplex, device>>(
CudaTensor<cuComplex, device>(pnia.tensor_indices,
pnia.shape));
}
else {
tensors[path[i].first] =
std::make_unique<CudaTensor<cuComplex, device>>(
CudaTensor<cuComplex, device>(
tn.GetNodes()[pnia.id].tensor));
}
if (pnib.id >= num_leafs) {
tensors[path[i].second] =
std::make_unique<CudaTensor<cuComplex, device>>(
CudaTensor<cuComplex, device>(pnib.tensor_indices,
pnib.shape));
}
else {
tensors[path[i].second] =
std::make_unique<CudaTensor<cuComplex, device>>(
CudaTensor<cuComplex, device>(
tn.GetNodes()[pnib.id].tensor));
}
tensors[num_leafs + i] =
std::make_unique<CudaTensor<cuComplex, device>>(
CudaTensor<cuComplex, device>(pnic.tensor_indices, pnic.shape));
CudaTensor<cuComplex, device>::GetCudaContractionPlan(
plans[i], *tensors[path[i].first], *tensors[path[i].second],
*tensors[num_leafs + i]);
}
tf::Task task = taskflow.emplace_on(
[&,path,path_node_info,num_leafs](tf::cudaFlowCapturer &capturer) {
for (int i = 0; i < path.size(); i++) {
const PathStepInfo &pnia = path_node_info[path[i].first];
const PathStepInfo &pnib = path_node_info[path[i].second];
const PathStepInfo &pnic = path_node_info[num_leafs + i];
auto tensor_a = tensors[path[i].first]->GetData();
auto tensor_b = tensors[path[i].second]->GetData();
auto tensor_c = tensors[num_leafs + i]->GetData();
auto &c_plan = plans[i];
tf::cudaTask kernel =
capturer.on([&, c_plan, tensor_a, tensor_b,
tensor_c](cudaStream_t stream) {
cuComplex alpha;
alpha.x = 1.;
alpha.y = 0.;
cuComplex beta;
beta.x = 0.;
beta.y = 0.;
cutensorContraction(&c_plan.handle, &c_plan.plan,
&alpha, tensor_a, tensor_b, &beta,
tensor_c, tensor_c, c_plan.work,
c_plan.work_size, stream);
});
kernel_tasks.push_back(kernel);
if (pnia.id >= num_leafs) {
kernel_tasks[pnia.id - num_leafs].precede(kernel);
}
if (pnib.id >= num_leafs) {
kernel_tasks[pnib.id - num_leafs].precede(kernel);
}
// copy data from gpu_data to host_data
if (i == path.size() - 1) {
result.resize(tensors[pnic.id]->GetSize());
tf::cudaTask d2h = capturer.memcpy(
result.data(), tensors[pnic.id]->GetData(),
tensors[pnic.id]->GetSize() * sizeof(cuComplex));
kernel.precede(d2h);
}
}
},
device);
}
int main(int argc, char *argv[])
{
if (argc != 4) {
std::cout << "heterogeneous_contraction.cu <tensor network file 1 on GPU 0> "
"<tensor network file 2 on GPU 1> <tensor network file 3 on CPU>"
<< std::endl;
std::cout << "Contracts three circuits on two GPUs and one CPU"
<< std::endl;
}
std::string file_name_0 = argv[1];
std::string file_name_1 = argv[2];
std::string file_name_2 = argv[3];
/*
* Load first tensor network file onto GPU 0
*/
TensorNetworkFile<CudaTensor<cuComplex, 0>> tensor_file_0;
try {
std::ifstream tn_data(file_name_0);
std::string circuit_str{std::istreambuf_iterator<char>(tn_data),
std::istreambuf_iterator<char>()};
// Load data into TensorNetwork and PathInfo objects
TensorNetworkSerializer<CudaTensor<cuComplex, 0>> serializer;
tensor_file_0 = serializer(circuit_str, true);
}
catch (...) {
std::cerr << "Please specify a valid first JSON file to contract"
<< std::endl;
exit(1);
}
TensorNetwork<CudaTensor<cuComplex, 0>> tn_0 = tensor_file_0.tensors;
PathInfo path_0 = tensor_file_0.path.value();
/**
* Load second tensor network file onto GPU 1
*/
TensorNetworkFile<CudaTensor<cuComplex, 1>> tensor_file_1;
try {
std::ifstream tn_data(file_name_1);
std::string circuit_str{std::istreambuf_iterator<char>(tn_data),
std::istreambuf_iterator<char>()};
// Load data into TensorNetwork and PathInfo objects
TensorNetworkSerializer<CudaTensor<cuComplex, 1>> serializer;
tensor_file_1 = serializer(circuit_str, true);
}
catch (...) {
std::cerr << "Please specify a valid second JSON file to contract"
<< std::endl;
exit(1);
}
TensorNetwork<CudaTensor<cuComplex, 1>> tn_1 = tensor_file_1.tensors;
PathInfo path_1 = tensor_file_1.path.value();
/**
* Load third tensor network file onto CPU
*/
TensorNetworkFile<Tensor<std::complex<float>>> tensor_file_2;
try {
std::ifstream tn_data(file_name_2);
std::string circuit_str{std::istreambuf_iterator<char>(tn_data),
std::istreambuf_iterator<char>()};
// Load data into TensorNetwork and PathInfo objects
TensorNetworkSerializer<Tensor<std::complex<float>>> serializer;
tensor_file_2 = serializer(circuit_str, true);
}
catch (...) {
std::cerr << "Please specify a valid JSON file to contract"
<< std::endl;
exit(1);
}
TensorNetwork<Tensor<std::complex<float>>> tn_2 =
tensor_file_2.tensors;
PathInfo path_2 = tensor_file_2.path.value();
tf::Taskflow taskflow;
/* set up gpu 0 contraction task */
CudaflowContractionTask<cuComplex, 0> gpu_task_0;
AddCudaContractionToTaskflow<cuComplex, 0>(tn_0, path_0, taskflow,
gpu_task_0);
/* set up gpu 1 contraction task */
CudaflowContractionTask<cuComplex, 1> gpu_task_1;
AddCudaContractionToTaskflow<cuComplex, 1>(tn_1, path_1, taskflow,
gpu_task_1);
/* set up cpu contraction task */
TaskBasedContractor<Tensor<std::complex<float>>> contractor;
contractor.AddContractionTasks(tn_2, path_2);
// Add gpu task graph to cpu task graph
contractor.AddTaskflow(taskflow);
/* Contract on all devices */
contractor.Contract().wait();
/* Display results */
auto result0 = gpu_task_0.result;
std::cout << "GPU 0 result = " << result0[0].x << " " << result0[0].y
<< std::endl;
auto result1 = gpu_task_1.result;
std::cout << "GPU 1 result = " << result1[0].x << " " << result1[0].y
<< std::endl;
auto result2 = contractor.GetResults()[0];
std::cout << "CPU result = " << result2 << std::endl;
return 0;
}
|
e98ace5eaab16f4e4a3d6e2f48cab8917e99f170.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "my_select.h"
#include <stdio.h>
__global__ void my_sort(double *X,int *indexes,int *copy_indexes,int *heads,double *distances, int *array_n,int n_all){
int idx = threadIdx.x +blockDim.x*blockIdx.x;
if(idx >= n_all )
return; //exit thread out of bound
int my_index = copy_indexes[idx]; // find the index
int my_head = heads[my_index]; // find the heads
if (my_head < 0 )
return; //exit thread, it's a head
int index_my_head = indexes[my_head]; // find index of head
int n = array_n[my_index]; // n = lenght of array for idx point
double my_dist = distances[my_index]; // distance for this point from head
int midle = (int)(n-1)/2; // the midle of the tree
int low_num = n - 1 - midle; // the lower midle values without head
int lower = 0; // counter for lower pointers than me
// main loop - find the midle point
for(int i=my_head+1 ; (i< n + my_head) && ( lower <= low_num +1 ); i++){
if (my_head != heads[copy_indexes[i]])
return;
double temp = distances[copy_indexes[i]];
if (temp <= my_dist) lower++;
}
if( lower != low_num ) // if you 're not the midle please return and wait
return;
// job only for the midles for each tree
double median = my_dist; // median is my distance
int id_lower= my_head + 1; // the index for the lower than median distances
int id_biger= my_head + 1 + low_num; // the index for the higher than median distances
int high_num = n - low_num - 1; // number of higher distances
for(int i=my_head+1 ; i<n + my_head; i++) // for my head index to n points
{
int idx_i = copy_indexes[i];
if(distances[idx_i] <= median){ // if is lower
indexes[(id_lower++)] = idx_i;
heads[idx_i] = my_head + 1;
array_n[idx_i] = low_num;
}else{ // if is higher
indexes[(id_biger++)] = idx_i;
heads[idx_i] = my_head + 1 + low_num;
array_n[idx_i] = high_num;
}
distances[idx_i] = 0;
}
distances[indexes[my_head]] = median + 1e-8;
heads[indexes[my_head + 1]] = heads[index_my_head]*2 ;
if(high_num != 0 )
heads[indexes[my_head + low_num + 1]] = heads[index_my_head]*2 -1 ;
}
| e98ace5eaab16f4e4a3d6e2f48cab8917e99f170.cu | #include "my_select.h"
#include <stdio.h>
__global__ void my_sort(double *X,int *indexes,int *copy_indexes,int *heads,double *distances, int *array_n,int n_all){
int idx = threadIdx.x +blockDim.x*blockIdx.x;
if(idx >= n_all )
return; //exit thread out of bound
int my_index = copy_indexes[idx]; // find the index
int my_head = heads[my_index]; // find the heads
if (my_head < 0 )
return; //exit thread, it's a head
int index_my_head = indexes[my_head]; // find index of head
int n = array_n[my_index]; // n = lenght of array for idx point
double my_dist = distances[my_index]; // distance for this point from head
int midle = (int)(n-1)/2; // the midle of the tree
int low_num = n - 1 - midle; // the lower midle values without head
int lower = 0; // counter for lower pointers than me
// main loop - find the midle point
for(int i=my_head+1 ; (i< n + my_head) && ( lower <= low_num +1 ); i++){
if (my_head != heads[copy_indexes[i]])
return;
double temp = distances[copy_indexes[i]];
if (temp <= my_dist) lower++;
}
if( lower != low_num ) // if you 're not the midle please return and wait
return;
// job only for the midles for each tree
double median = my_dist; // median is my distance
int id_lower= my_head + 1; // the index for the lower than median distances
int id_biger= my_head + 1 + low_num; // the index for the higher than median distances
int high_num = n - low_num - 1; // number of higher distances
for(int i=my_head+1 ; i<n + my_head; i++) // for my head index to n points
{
int idx_i = copy_indexes[i];
if(distances[idx_i] <= median){ // if is lower
indexes[(id_lower++)] = idx_i;
heads[idx_i] = my_head + 1;
array_n[idx_i] = low_num;
}else{ // if is higher
indexes[(id_biger++)] = idx_i;
heads[idx_i] = my_head + 1 + low_num;
array_n[idx_i] = high_num;
}
distances[idx_i] = 0;
}
distances[indexes[my_head]] = median + 1e-8;
heads[indexes[my_head + 1]] = heads[index_my_head]*2 ;
if(high_num != 0 )
heads[indexes[my_head + low_num + 1]] = heads[index_my_head]*2 -1 ;
}
|
7973fadfaeaad135e9951db583260fa0d6593b99.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <map>
#include <string>
#include <vector>
#include "caffe/layers/predict_box_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PredictBoxForward(const int num, const int spatial_dim, const int height, const int width,
const Dtype* score_data, Dtype* bb_data, Dtype positive_thresh_,
int stride_, int receptive_field_, Dtype* counter_,
bool bounding_box_regression_, const Dtype* bbr_data, bool bounding_box_exp_,
bool use_stitch_, const Dtype* stitch_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
int h = s / width;
int w = s % width;
if (score_data[((n * 2 + 1) * height + h) * width + w] > positive_thresh_ &&
score_data[((n * 2 + 1) * height + h) * width + w] < 1 + 1e-6 &&
!(use_stitch_ && stitch_data[((n * 2 + 2) * height + h) * width + w] == 0)) {
Dtype bias_x = use_stitch_ ? stitch_data[((n * 2 + 0) * height + h) * width + w] : 0;
Dtype bias_y = use_stitch_ ? stitch_data[((n * 2 + 1) * height + h) * width + w] : 0;
Dtype real_receptive_field = use_stitch_ ? stitch_data[((n * 2 + 2) * height + h) * width + w] : receptive_field_;
bb_data[((n * 5 + 0) * height + h) * width + w] = (Dtype(w * stride_) - bias_x) / Dtype(12) * real_receptive_field;
bb_data[((n * 5 + 1) * height + h) * width + w] = (Dtype(h * stride_) - bias_y) / Dtype(12) * real_receptive_field;
bb_data[((n * 5 + 2) * height + h) * width + w] = real_receptive_field;
bb_data[((n * 5 + 3) * height + h) * width + w] = real_receptive_field;
bb_data[((n * 5 + 4) * height + h) * width + w] = score_data[((n * 2 + 1) * height + h) * width + w];
if (bounding_box_regression_) {
if (bounding_box_exp_) {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 2) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 2) * height + h) * width + w]);
bb_data[((n * 5 + 3) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 3) * height + h) * width + w]);
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 2) * height + h) * width + w] +=
(bbr_data[((n * 4 + 3) * height + h) * width + w] - bbr_data[((n * 4 + 1) * height + h) * width + w]) * real_receptive_field;
bb_data[((n * 5 + 3) * height + h) * width + w] +=
(bbr_data[((n * 4 + 2) * height + h) * width + w] - bbr_data[((n * 4 + 0) * height + h) * width + w]) * real_receptive_field;
}
}
counter_[(n * height + h) * width + w] = 1;
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] = 0;
bb_data[((n * 5 + 1) * height + h) * width + w] = 0;
bb_data[((n * 5 + 2) * height + h) * width + w] = 0;
bb_data[((n * 5 + 3) * height + h) * width + w] = 0;
bb_data[((n * 5 + 4) * height + h) * width + w] = 0;
counter_[(n * height + h) * width + w] = 0;
}
}
}
template <typename Dtype>
__global__ void PredictBoxForwardWithNMS(const int num, const int spatial_dim, const int height, const int width,
const Dtype* score_data, Dtype* bb_data, Dtype positive_thresh_,
int stride_, int receptive_field_, Dtype* counter_, const Dtype* nms_data,
bool bounding_box_regression_, const Dtype* bbr_data, bool bounding_box_exp_) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
int h = s / width;
int w = s % width;
if (score_data[((n * 2 + 1) * height + h) * width + w] > positive_thresh_ &&
score_data[((n * 2 + 1) * height + h) * width + w] < 1 + 1e-6 &&
score_data[((n * 2 + 1) * height + h) * width + w] > nms_data[((n * 2 + 1) * height + h) * width + w] - 1e-6) {
bb_data[((n * 5 + 0) * height + h) * width + w] = w * stride_;
bb_data[((n * 5 + 1) * height + h) * width + w] = h * stride_;
bb_data[((n * 5 + 2) * height + h) * width + w] = receptive_field_;
bb_data[((n * 5 + 3) * height + h) * width + w] = receptive_field_;
bb_data[((n * 5 + 4) * height + h) * width + w] = score_data[((n * 2 + 1) * height + h) * width + w];
if (bounding_box_regression_) {
if (bounding_box_exp_) {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 2) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 2) * height + h) * width + w]);
bb_data[((n * 5 + 3) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 3) * height + h) * width + w]);
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 2) * height + h) * width + w] +=
(bbr_data[((n * 4 + 3) * height + h) * width + w] - bbr_data[((n * 4 + 1) * height + h) * width + w]) * receptive_field_;
bb_data[((n * 5 + 3) * height + h) * width + w] +=
(bbr_data[((n * 4 + 2) * height + h) * width + w] - bbr_data[((n * 4 + 0) * height + h) * width + w]) * receptive_field_;
}
}
counter_[(n * height + h) * width + w] = 1;
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] = 0;
bb_data[((n * 5 + 1) * height + h) * width + w] = 0;
bb_data[((n * 5 + 2) * height + h) * width + w] = 0;
bb_data[((n * 5 + 3) * height + h) * width + w] = 0;
bb_data[((n * 5 + 4) * height + h) * width + w] = 0;
counter_[(n * height + h) * width + w] = 0;
}
}
}
template <typename Dtype>
void PredictBoxLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* score_data = bottom[0]->gpu_data();
Dtype* bb_data = top[0]->mutable_gpu_data();
const Dtype* bbr_data = NULL;
if (bounding_box_regression_) bbr_data = bottom[1]->gpu_data();
int num = bottom[0]->num();
int output_height = bottom[0]->height();
int output_width = bottom[0]->width();
int spatial_dim = output_height * output_width;
Dtype count = Dtype(0.0);
if (nms_) {
PredictBoxForwardWithNMS<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> > (num, spatial_dim, output_height, output_width,
score_data, bb_data, positive_thresh_,
stride_, receptive_field_, counter_.mutable_gpu_data(), bottom[2]->gpu_data(),
bounding_box_regression_, bbr_data, bounding_box_exp_);
}
else {
PredictBoxForward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> > (num, spatial_dim, output_height, output_width,
score_data, bb_data, positive_thresh_,
stride_, receptive_field_, counter_.mutable_gpu_data(),
bounding_box_regression_, bbr_data, bounding_box_exp_,
use_stitch_, use_stitch_ ? bottom[2]->gpu_data() : NULL);
}
if (output_vector_) {
caffe_gpu_asum(num*spatial_dim, counter_.gpu_data(), &count);
const Dtype* score_data_cpu = bottom[0]->cpu_data();
const Dtype* bb_data_cpu = top[0]->cpu_data();
if (num == 1 && count > 0) {
#if __cplusplus < 201103L
int arr[] = { bottom[0]->num(), (int)count, 5 };
vector<int> shape(arr,arr+sizeof(arr)/sizeof(int));
top[1]->Reshape(shape);
#else
top[1]->Reshape({ bottom[0]->num(), (int)count, 5 });
#endif
int i = 0;
for (int x = 0; x < output_width; x++) {
for (int y = 0; y < output_height; y++) {
if (bb_data_cpu[(4 * output_height + y) * output_width + x] > positive_thresh_) {
top[1]->mutable_cpu_data()[i * 5 + 0] = bb_data_cpu[(0 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 1] = bb_data_cpu[(1 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 2] = bb_data_cpu[(2 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 3] = bb_data_cpu[(3 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 4] = bb_data_cpu[(4 * output_height + y) * output_width + x];
i++;
}
}
}
}
else {
#if __cplusplus < 201103L
int arr[] = { bottom[0]->num(), 1, 5 };
vector<int> shape(arr,arr+sizeof(arr)/sizeof(int));
top[1]->Reshape(shape);
#else
top[1]->Reshape({ bottom[0]->num(), 1, 5 });
#endif
caffe_gpu_set<Dtype>(top[1]->count(), 0, top[1]->mutable_gpu_data());
}
}
}
template <typename Dtype>
void PredictBoxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
NOT_IMPLEMENTED;
}
INSTANTIATE_LAYER_GPU_FUNCS(PredictBoxLayer);
} // namespace caffe
| 7973fadfaeaad135e9951db583260fa0d6593b99.cu | #include <algorithm>
#include <map>
#include <string>
#include <vector>
#include "caffe/layers/predict_box_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PredictBoxForward(const int num, const int spatial_dim, const int height, const int width,
const Dtype* score_data, Dtype* bb_data, Dtype positive_thresh_,
int stride_, int receptive_field_, Dtype* counter_,
bool bounding_box_regression_, const Dtype* bbr_data, bool bounding_box_exp_,
bool use_stitch_, const Dtype* stitch_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
int h = s / width;
int w = s % width;
if (score_data[((n * 2 + 1) * height + h) * width + w] > positive_thresh_ &&
score_data[((n * 2 + 1) * height + h) * width + w] < 1 + 1e-6 &&
!(use_stitch_ && stitch_data[((n * 2 + 2) * height + h) * width + w] == 0)) {
Dtype bias_x = use_stitch_ ? stitch_data[((n * 2 + 0) * height + h) * width + w] : 0;
Dtype bias_y = use_stitch_ ? stitch_data[((n * 2 + 1) * height + h) * width + w] : 0;
Dtype real_receptive_field = use_stitch_ ? stitch_data[((n * 2 + 2) * height + h) * width + w] : receptive_field_;
bb_data[((n * 5 + 0) * height + h) * width + w] = (Dtype(w * stride_) - bias_x) / Dtype(12) * real_receptive_field;
bb_data[((n * 5 + 1) * height + h) * width + w] = (Dtype(h * stride_) - bias_y) / Dtype(12) * real_receptive_field;
bb_data[((n * 5 + 2) * height + h) * width + w] = real_receptive_field;
bb_data[((n * 5 + 3) * height + h) * width + w] = real_receptive_field;
bb_data[((n * 5 + 4) * height + h) * width + w] = score_data[((n * 2 + 1) * height + h) * width + w];
if (bounding_box_regression_) {
if (bounding_box_exp_) {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 2) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 2) * height + h) * width + w]);
bb_data[((n * 5 + 3) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 3) * height + h) * width + w]);
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * real_receptive_field;
bb_data[((n * 5 + 2) * height + h) * width + w] +=
(bbr_data[((n * 4 + 3) * height + h) * width + w] - bbr_data[((n * 4 + 1) * height + h) * width + w]) * real_receptive_field;
bb_data[((n * 5 + 3) * height + h) * width + w] +=
(bbr_data[((n * 4 + 2) * height + h) * width + w] - bbr_data[((n * 4 + 0) * height + h) * width + w]) * real_receptive_field;
}
}
counter_[(n * height + h) * width + w] = 1;
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] = 0;
bb_data[((n * 5 + 1) * height + h) * width + w] = 0;
bb_data[((n * 5 + 2) * height + h) * width + w] = 0;
bb_data[((n * 5 + 3) * height + h) * width + w] = 0;
bb_data[((n * 5 + 4) * height + h) * width + w] = 0;
counter_[(n * height + h) * width + w] = 0;
}
}
}
template <typename Dtype>
__global__ void PredictBoxForwardWithNMS(const int num, const int spatial_dim, const int height, const int width,
const Dtype* score_data, Dtype* bb_data, Dtype positive_thresh_,
int stride_, int receptive_field_, Dtype* counter_, const Dtype* nms_data,
bool bounding_box_regression_, const Dtype* bbr_data, bool bounding_box_exp_) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
int h = s / width;
int w = s % width;
if (score_data[((n * 2 + 1) * height + h) * width + w] > positive_thresh_ &&
score_data[((n * 2 + 1) * height + h) * width + w] < 1 + 1e-6 &&
score_data[((n * 2 + 1) * height + h) * width + w] > nms_data[((n * 2 + 1) * height + h) * width + w] - 1e-6) {
bb_data[((n * 5 + 0) * height + h) * width + w] = w * stride_;
bb_data[((n * 5 + 1) * height + h) * width + w] = h * stride_;
bb_data[((n * 5 + 2) * height + h) * width + w] = receptive_field_;
bb_data[((n * 5 + 3) * height + h) * width + w] = receptive_field_;
bb_data[((n * 5 + 4) * height + h) * width + w] = score_data[((n * 2 + 1) * height + h) * width + w];
if (bounding_box_regression_) {
if (bounding_box_exp_) {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 2) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 2) * height + h) * width + w]);
bb_data[((n * 5 + 3) * height + h) * width + w] *= exp(bbr_data[((n * 4 + 3) * height + h) * width + w]);
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] += bbr_data[((n * 4 + 1) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 1) * height + h) * width + w] += bbr_data[((n * 4 + 0) * height + h) * width + w] * receptive_field_;
bb_data[((n * 5 + 2) * height + h) * width + w] +=
(bbr_data[((n * 4 + 3) * height + h) * width + w] - bbr_data[((n * 4 + 1) * height + h) * width + w]) * receptive_field_;
bb_data[((n * 5 + 3) * height + h) * width + w] +=
(bbr_data[((n * 4 + 2) * height + h) * width + w] - bbr_data[((n * 4 + 0) * height + h) * width + w]) * receptive_field_;
}
}
counter_[(n * height + h) * width + w] = 1;
}
else {
bb_data[((n * 5 + 0) * height + h) * width + w] = 0;
bb_data[((n * 5 + 1) * height + h) * width + w] = 0;
bb_data[((n * 5 + 2) * height + h) * width + w] = 0;
bb_data[((n * 5 + 3) * height + h) * width + w] = 0;
bb_data[((n * 5 + 4) * height + h) * width + w] = 0;
counter_[(n * height + h) * width + w] = 0;
}
}
}
template <typename Dtype>
void PredictBoxLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* score_data = bottom[0]->gpu_data();
Dtype* bb_data = top[0]->mutable_gpu_data();
const Dtype* bbr_data = NULL;
if (bounding_box_regression_) bbr_data = bottom[1]->gpu_data();
int num = bottom[0]->num();
int output_height = bottom[0]->height();
int output_width = bottom[0]->width();
int spatial_dim = output_height * output_width;
Dtype count = Dtype(0.0);
if (nms_) {
PredictBoxForwardWithNMS<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> > (num, spatial_dim, output_height, output_width,
score_data, bb_data, positive_thresh_,
stride_, receptive_field_, counter_.mutable_gpu_data(), bottom[2]->gpu_data(),
bounding_box_regression_, bbr_data, bounding_box_exp_);
}
else {
PredictBoxForward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> > (num, spatial_dim, output_height, output_width,
score_data, bb_data, positive_thresh_,
stride_, receptive_field_, counter_.mutable_gpu_data(),
bounding_box_regression_, bbr_data, bounding_box_exp_,
use_stitch_, use_stitch_ ? bottom[2]->gpu_data() : NULL);
}
if (output_vector_) {
caffe_gpu_asum(num*spatial_dim, counter_.gpu_data(), &count);
const Dtype* score_data_cpu = bottom[0]->cpu_data();
const Dtype* bb_data_cpu = top[0]->cpu_data();
if (num == 1 && count > 0) {
#if __cplusplus < 201103L
int arr[] = { bottom[0]->num(), (int)count, 5 };
vector<int> shape(arr,arr+sizeof(arr)/sizeof(int));
top[1]->Reshape(shape);
#else
top[1]->Reshape({ bottom[0]->num(), (int)count, 5 });
#endif
int i = 0;
for (int x = 0; x < output_width; x++) {
for (int y = 0; y < output_height; y++) {
if (bb_data_cpu[(4 * output_height + y) * output_width + x] > positive_thresh_) {
top[1]->mutable_cpu_data()[i * 5 + 0] = bb_data_cpu[(0 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 1] = bb_data_cpu[(1 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 2] = bb_data_cpu[(2 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 3] = bb_data_cpu[(3 * output_height + y) * output_width + x];
top[1]->mutable_cpu_data()[i * 5 + 4] = bb_data_cpu[(4 * output_height + y) * output_width + x];
i++;
}
}
}
}
else {
#if __cplusplus < 201103L
int arr[] = { bottom[0]->num(), 1, 5 };
vector<int> shape(arr,arr+sizeof(arr)/sizeof(int));
top[1]->Reshape(shape);
#else
top[1]->Reshape({ bottom[0]->num(), 1, 5 });
#endif
caffe_gpu_set<Dtype>(top[1]->count(), 0, top[1]->mutable_gpu_data());
}
}
}
template <typename Dtype>
void PredictBoxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
NOT_IMPLEMENTED;
}
INSTANTIATE_LAYER_GPU_FUNCS(PredictBoxLayer);
} // namespace caffe
|
4fae7c8242b256793c6972e18a4d96d336270815.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// DOBFS includes
#include <gunrock/app/dobfs/dobfs_enactor.cuh>
#include <gunrock/app/dobfs/dobfs_problem.cuh>
#include <gunrock/app/dobfs/dobfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
using namespace gunrock::app::dobfs;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
float g_alpha;
float g_beta;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf (
" test_dobfs <graph type> <graph type args> [--device=<device_index>]\n"
" [--src=<source_index>] [--instrumented] [--idempotence=<0|1>] [--v]\n"
" [--undirected] [--iteration-num=<num>] [--quick=<0|1>] [--mark-pred]\n"
" [--queue-sizing=<scale factor>]\n"
"\n"
"Graph types and args:\n"
" market <file>\n"
" Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the test. [Default: 0].\n"
" --undirected Treat the graph as undirected (symmetric).\n"
" --idempotence=<0 or 1> Enable: 1, Disable: 0 [Default: Enable].\n"
" --instrumented Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty\n"
" (a relative indicator of load imbalance.)\n"
" --src=<source vertex id> Begins BFS from the source [Default: 0].\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
" --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n"
" --mark-pred Keep both label info and predecessor info.\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at: \n"
" (graph-edges * <scale factor>). [Default: 1.0]\n"
" --v Print verbose per iteration debug info.\n"
" --iteration-num=<number> Number of runs to perform the test [Default: 1].\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] source_path Search depth from the source for each node.
* @param[in] preds Predecessor node id for each node.
* @param[in] nodes Number of nodes in the graph.
* @param[in] MARK_PREDECESSORS Whether to show predecessor of each node.
* @param[in] ENABLE_IDEMPOTENCE Whether to enable idempotence mode.
*/
template<typename VertexId, typename SizeT>
void DisplaySolution (VertexId *source_path,
VertexId *preds,
SizeT nodes,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE)
{
if (nodes > 40) nodes = 40;
printf("\nFirst %d labels of the GPU result.\n", nodes);
printf("[");
for (VertexId i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(source_path[i]);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE) {
printf(",");
PrintValue(preds[i]);
}
printf(" ");
}
printf("]\n");
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam MARK_PREDECESSORS
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] src Source node where BFS starts
* @param[in] h_labels Host-side vector stores computed labels for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] search_depth Maximum search depth of the BFS algorithm
* @param[in] total_queued Total element queued in BFS kernel running process
* @param[in] avg_duty Average duty of the BFS kernels
*/
template<
bool MARK_PREDECESSORS,
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
VertexId src,
VertexId *h_labels,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
VertexId search_depth,
long long total_queued,
double avg_duty)
{
// Compute nodes and edges visited
SizeT edges_visited = 0;
SizeT nodes_visited = 0;
for (VertexId i = 0; i < graph.nodes; ++i) {
if (h_labels[i] > -1) {
++nodes_visited;
edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i];
}
}
double redundant_work = 0.0;
if (total_queued > 0) {
redundant_work = ((double) total_queued - edges_visited) / edges_visited;
// measure duplicate edges put through queue
}
redundant_work *= 100;
// Display test name
printf("[%s] finished. ", stats.name);
// Display statistics
if (nodes_visited < 5) {
printf("Fewer than 5 vertices visited.\n");
} else {
// Display the specific sample statistics
double m_teps = (double) edges_visited / (elapsed * 1000.0);
printf("\n elapsed: %.4f ms, rate: %.4f MiEdges/s", elapsed, m_teps);
if (search_depth != 0) printf(", search_depth: %lld", (long long) search_depth);
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n src: %lld, nodes_visited: %lld, edges_visited: %lld",
(long long) src, (long long) nodes_visited, (long long) edges_visited);
if (total_queued > 0) {
printf(", total queued: %lld", total_queued);
}
if (redundant_work > 0) {
printf(", redundant work: %.2f%%", redundant_work);
}
printf("\n");
}
}
/******************************************************************************
* BFS Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] source_path Host-side vector to store CPU computed labels for each node
* @param[in] src Source node where BFS starts
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceBfs(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *source_path,
VertexId src)
{
// Initialize distances
for (VertexId i = 0; i < graph.nodes; ++i)
{
source_path[i] = -1;
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
//Perform BFS
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
//Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
printf("CPU BFS finished in %lf msec. Search depth is: %d\n",
elapsed, search_depth);
}
/**
* @brief Run BFS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inverse CSC graph we process on
* @param[in] src Source node where BFS starts
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] max_queue_sizing Scaling factor used in edge mapping
* @param[in] alpha Tuning parameter for switching to reverse bfs
* @param[in] beta Tuning parameter for switching back to normal bfs
* @param[in] iterations Number of iterations for running the test
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
const Csr<VertexId, Value, SizeT> &inv_graph,
VertexId src,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
float alpha, // Tuning parameter for switching to reverse bfs
float beta, // Tuning parameter for switching back to normal bfs
int iterations,
CudaContext& context)
{
typedef DOBFSProblem<
VertexId,
SizeT,
Value,
MARK_PREDECESSORS,
ENABLE_IDEMPOTENCE,
(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE)> Problem; // does not use double buffer
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check = (g_quick) ? NULL : reference_labels;
VertexId *h_preds = NULL;
if (MARK_PREDECESSORS)
{
h_preds = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
}
// Allocate BFS enactor map
DOBFSEnactor<INSTRUMENT> dobfs_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
g_undirected,
graph,
inv_graph,
num_gpus,
alpha,
beta),
"Problem DOBFS Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check != NULL)
{
printf("Computing reference value ...\n");
SimpleReferenceBfs(
graph,
reference_check,
src);
printf("\n");
}
Stats *stats = new Stats("GPU DOBFS");
long long total_queued = 0;
VertexId search_depth = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter=0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(
src, dobfs_enactor.GetFrontierType(), max_queue_sizing),
"DOBFS Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
dobfs_enactor.template Enact<Problem>(
context, csr_problem, src, max_grid_size),
"DOBFS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
dobfs_enactor.GetStatistics(total_queued, search_depth, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_labels, h_preds),
"DOBFS Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check != NULL)
{
if (!MARK_PREDECESSORS)
{
printf("Validity: ");
CompareResults(h_labels, reference_check, graph.nodes, true);
}
}
// Display Solution
DisplaySolution(
h_labels, h_preds, graph.nodes, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE);
DisplayStats<MARK_PREDECESSORS>(
*stats,
src,
h_labels,
graph,
elapsed,
search_depth,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_labels) free(reference_labels);
if (h_labels) free(h_labels);
if (h_preds) free(h_preds);
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
Csr<VertexId, Value, SizeT> &inv_graph,
CommandLineArgs &args,
CudaContext& context)
{
VertexId src = -1; // Use whatever the specified graph-type's default is
std::string src_str;
bool instrumented = 0; // Whether or not to collect instrumentation from kernels
bool mark_pred = 0; // Whether or not to mark src-distance vs. parent vertices
bool idempotence = 1; // Whether or not to enable idempotence operation
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0; // Maximum size scaling factor for work queues (e.g., 1.0 creates n and m-element vertex and edge frontiers).
int iterations = 1; // Number of runs
g_quick = false; // Whether or not to skip reference validation
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("src", src_str);
if (src_str.empty())
{
src = 0;
}
else if (src_str.compare("randomize") == 0)
{
src = graphio::RandomNode(graph.nodes);
}
else if (src_str.compare("largestdegree") == 0)
{
int temp;
src = graph.GetNodeWithHighestDegree(temp);
}
else
{
args.GetCmdLineArgument("src", src);
}
mark_pred = args.CheckCmdLineFlag("mark-pred");
g_verbose = args.CheckCmdLineFlag("v");
g_quick = args.CheckCmdLineFlag("quick");
args.GetCmdLineArgument("iteration-num", iterations);
args.GetCmdLineArgument("grid-size", max_grid_size);
args.GetCmdLineArgument("idempotence", idempotence);
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
args.GetCmdLineArgument("alpha", g_alpha);
args.GetCmdLineArgument("beta", g_beta);
if (g_alpha == 0.0f) g_alpha = 12.0f;
if (g_beta == 0.0f) g_beta = 6.0f;
// printf("alpha: %5f, beta: %5f\n", g_alpha, g_beta);
if (instrumented)
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, true, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, true, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, false, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, false, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
}
else
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, true, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, true, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, false, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, false, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//hipSetDeviceFlags(hipDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
Csr<VertexId, Value, SizeT> inv_csr(false);
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0)
{
return 1;
}
if (!g_undirected)
{
if (graphio::BuildMarketGraph<false>(
market_filename,
inv_csr,
g_undirected,
true) != 0)
{
return 1;
}
}
csr.PrintHistogram();
if (!g_undirected)
{
// Run tests
RunTests(csr, inv_csr, args, *context);
}
else
{
RunTests(csr, csr, args, *context);
}
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
| 4fae7c8242b256793c6972e18a4d96d336270815.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// DOBFS includes
#include <gunrock/app/dobfs/dobfs_enactor.cuh>
#include <gunrock/app/dobfs/dobfs_problem.cuh>
#include <gunrock/app/dobfs/dobfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
using namespace gunrock::app::dobfs;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
float g_alpha;
float g_beta;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf (
" test_dobfs <graph type> <graph type args> [--device=<device_index>]\n"
" [--src=<source_index>] [--instrumented] [--idempotence=<0|1>] [--v]\n"
" [--undirected] [--iteration-num=<num>] [--quick=<0|1>] [--mark-pred]\n"
" [--queue-sizing=<scale factor>]\n"
"\n"
"Graph types and args:\n"
" market <file>\n"
" Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the test. [Default: 0].\n"
" --undirected Treat the graph as undirected (symmetric).\n"
" --idempotence=<0 or 1> Enable: 1, Disable: 0 [Default: Enable].\n"
" --instrumented Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty\n"
" (a relative indicator of load imbalance.)\n"
" --src=<source vertex id> Begins BFS from the source [Default: 0].\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
" --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n"
" --mark-pred Keep both label info and predecessor info.\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at: \n"
" (graph-edges * <scale factor>). [Default: 1.0]\n"
" --v Print verbose per iteration debug info.\n"
" --iteration-num=<number> Number of runs to perform the test [Default: 1].\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] source_path Search depth from the source for each node.
* @param[in] preds Predecessor node id for each node.
* @param[in] nodes Number of nodes in the graph.
* @param[in] MARK_PREDECESSORS Whether to show predecessor of each node.
* @param[in] ENABLE_IDEMPOTENCE Whether to enable idempotence mode.
*/
template<typename VertexId, typename SizeT>
void DisplaySolution (VertexId *source_path,
VertexId *preds,
SizeT nodes,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE)
{
if (nodes > 40) nodes = 40;
printf("\nFirst %d labels of the GPU result.\n", nodes);
printf("[");
for (VertexId i = 0; i < nodes; ++i) {
PrintValue(i);
printf(":");
PrintValue(source_path[i]);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE) {
printf(",");
PrintValue(preds[i]);
}
printf(" ");
}
printf("]\n");
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam MARK_PREDECESSORS
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] src Source node where BFS starts
* @param[in] h_labels Host-side vector stores computed labels for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] search_depth Maximum search depth of the BFS algorithm
* @param[in] total_queued Total element queued in BFS kernel running process
* @param[in] avg_duty Average duty of the BFS kernels
*/
template<
bool MARK_PREDECESSORS,
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
VertexId src,
VertexId *h_labels,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
VertexId search_depth,
long long total_queued,
double avg_duty)
{
// Compute nodes and edges visited
SizeT edges_visited = 0;
SizeT nodes_visited = 0;
for (VertexId i = 0; i < graph.nodes; ++i) {
if (h_labels[i] > -1) {
++nodes_visited;
edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i];
}
}
double redundant_work = 0.0;
if (total_queued > 0) {
redundant_work = ((double) total_queued - edges_visited) / edges_visited;
// measure duplicate edges put through queue
}
redundant_work *= 100;
// Display test name
printf("[%s] finished. ", stats.name);
// Display statistics
if (nodes_visited < 5) {
printf("Fewer than 5 vertices visited.\n");
} else {
// Display the specific sample statistics
double m_teps = (double) edges_visited / (elapsed * 1000.0);
printf("\n elapsed: %.4f ms, rate: %.4f MiEdges/s", elapsed, m_teps);
if (search_depth != 0) printf(", search_depth: %lld", (long long) search_depth);
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n src: %lld, nodes_visited: %lld, edges_visited: %lld",
(long long) src, (long long) nodes_visited, (long long) edges_visited);
if (total_queued > 0) {
printf(", total queued: %lld", total_queued);
}
if (redundant_work > 0) {
printf(", redundant work: %.2f%%", redundant_work);
}
printf("\n");
}
}
/******************************************************************************
* BFS Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] source_path Host-side vector to store CPU computed labels for each node
* @param[in] src Source node where BFS starts
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceBfs(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *source_path,
VertexId src)
{
// Initialize distances
for (VertexId i = 0; i < graph.nodes; ++i)
{
source_path[i] = -1;
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
//Perform BFS
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
//Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
printf("CPU BFS finished in %lf msec. Search depth is: %d\n",
elapsed, search_depth);
}
/**
* @brief Run BFS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam MARK_PREDECESSORS
* @tparam ENABLE_IDEMPOTENCE
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the inverse CSC graph we process on
* @param[in] src Source node where BFS starts
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] max_queue_sizing Scaling factor used in edge mapping
* @param[in] alpha Tuning parameter for switching to reverse bfs
* @param[in] beta Tuning parameter for switching back to normal bfs
* @param[in] iterations Number of iterations for running the test
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
const Csr<VertexId, Value, SizeT> &inv_graph,
VertexId src,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
float alpha, // Tuning parameter for switching to reverse bfs
float beta, // Tuning parameter for switching back to normal bfs
int iterations,
CudaContext& context)
{
typedef DOBFSProblem<
VertexId,
SizeT,
Value,
MARK_PREDECESSORS,
ENABLE_IDEMPOTENCE,
(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE)> Problem; // does not use double buffer
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check = (g_quick) ? NULL : reference_labels;
VertexId *h_preds = NULL;
if (MARK_PREDECESSORS)
{
h_preds = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
}
// Allocate BFS enactor map
DOBFSEnactor<INSTRUMENT> dobfs_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
g_undirected,
graph,
inv_graph,
num_gpus,
alpha,
beta),
"Problem DOBFS Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check != NULL)
{
printf("Computing reference value ...\n");
SimpleReferenceBfs(
graph,
reference_check,
src);
printf("\n");
}
Stats *stats = new Stats("GPU DOBFS");
long long total_queued = 0;
VertexId search_depth = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
float elapsed = 0.0f;
for (int iter=0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(
src, dobfs_enactor.GetFrontierType(), max_queue_sizing),
"DOBFS Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
dobfs_enactor.template Enact<Problem>(
context, csr_problem, src, max_grid_size),
"DOBFS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
dobfs_enactor.GetStatistics(total_queued, search_depth, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_labels, h_preds),
"DOBFS Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check != NULL)
{
if (!MARK_PREDECESSORS)
{
printf("Validity: ");
CompareResults(h_labels, reference_check, graph.nodes, true);
}
}
// Display Solution
DisplaySolution(
h_labels, h_preds, graph.nodes, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE);
DisplayStats<MARK_PREDECESSORS>(
*stats,
src,
h_labels,
graph,
elapsed,
search_depth,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_labels) free(reference_labels);
if (h_labels) free(h_labels);
if (h_preds) free(h_preds);
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] inv_graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
Csr<VertexId, Value, SizeT> &inv_graph,
CommandLineArgs &args,
CudaContext& context)
{
VertexId src = -1; // Use whatever the specified graph-type's default is
std::string src_str;
bool instrumented = 0; // Whether or not to collect instrumentation from kernels
bool mark_pred = 0; // Whether or not to mark src-distance vs. parent vertices
bool idempotence = 1; // Whether or not to enable idempotence operation
int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0; // Maximum size scaling factor for work queues (e.g., 1.0 creates n and m-element vertex and edge frontiers).
int iterations = 1; // Number of runs
g_quick = false; // Whether or not to skip reference validation
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("src", src_str);
if (src_str.empty())
{
src = 0;
}
else if (src_str.compare("randomize") == 0)
{
src = graphio::RandomNode(graph.nodes);
}
else if (src_str.compare("largestdegree") == 0)
{
int temp;
src = graph.GetNodeWithHighestDegree(temp);
}
else
{
args.GetCmdLineArgument("src", src);
}
mark_pred = args.CheckCmdLineFlag("mark-pred");
g_verbose = args.CheckCmdLineFlag("v");
g_quick = args.CheckCmdLineFlag("quick");
args.GetCmdLineArgument("iteration-num", iterations);
args.GetCmdLineArgument("grid-size", max_grid_size);
args.GetCmdLineArgument("idempotence", idempotence);
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
args.GetCmdLineArgument("alpha", g_alpha);
args.GetCmdLineArgument("beta", g_beta);
if (g_alpha == 0.0f) g_alpha = 12.0f;
if (g_beta == 0.0f) g_beta = 6.0f;
// printf("alpha: %5f, beta: %5f\n", g_alpha, g_beta);
if (instrumented)
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, true, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, true, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, false, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, false, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
}
else
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, true, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, true, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, false, true>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, false, false>(
graph,
inv_graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
g_alpha,
g_beta,
iterations,
context);
}
}
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//cudaSetDeviceFlags(cudaDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
Csr<VertexId, Value, SizeT> inv_csr(false);
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0)
{
return 1;
}
if (!g_undirected)
{
if (graphio::BuildMarketGraph<false>(
market_filename,
inv_csr,
g_undirected,
true) != 0)
{
return 1;
}
}
csr.PrintHistogram();
if (!g_undirected)
{
// Run tests
RunTests(csr, inv_csr, args, *context);
}
else
{
RunTests(csr, csr, args, *context);
}
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
5df6f37a3c869a8e61f257592644f6270d63734c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief unit tests for NHWC tensor layout
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/device_memory.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace layout {
void test_nhwc_layout(int n_size, int h_size, int w_size, int c_size) {
int ldc = c_size + 1;
int ldw = ldc * (w_size + 2);
int ldh = ldw * (h_size + 3);
typedef cutlass::layout::TensorNHWC Tensor;
Tensor::Stride tensor_stride({ ldc, ldw, ldh });
Tensor tensor_nhw_packed_c(tensor_stride);
// test pointer offset
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
for (int c_idx = 0; c_idx < c_size; c_idx++) {
cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, c_idx);
auto ptr_offset = tensor_nhw_packed_c(tensor_coord);
decltype(ptr_offset) reference_offset = c_idx +
q_idx * ldc +
p_idx * ldw +
n_idx * ldh;
EXPECT_EQ(ptr_offset, reference_offset);
}
}
}
}
// test stride
auto stride = tensor_nhw_packed_c.stride();
EXPECT_EQ(stride, tensor_stride);
// test capacity
auto capacity = tensor_nhw_packed_c.capacity(
cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size));
decltype(capacity) referece_capacity = ldh * n_size;
EXPECT_EQ(capacity, referece_capacity);
}
__global__ void test_nhwc_inverse(
int *output, int n_size, int h_size, int w_size, int c_size) {
int ldc = c_size;
int ldw = ldc * w_size;
int ldh = ldw * h_size;
typedef cutlass::layout::TensorNHWC Tensor;
Tensor::Stride tensor_stride({ ldc, ldw, ldh });
Tensor tensor_nhw_packed_c(tensor_stride);
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, threadIdx.x);
int ptr_offset = tensor_nhw_packed_c(tensor_coord);
cutlass::Tensor4DCoord inv_coord = tensor_nhw_packed_c.inverse(ptr_offset);
output[ptr_offset] = tensor_nhw_packed_c(inv_coord);
}
}
}
}
class TestTensorNHWC {
public:
//
// Data members
//
//
// Methods
//
/// Ctor
TestTensorNHWC() {
}
/// Runs the test
void run(int n_size, int h_size, int w_size, int c_size) {
size_t size = n_size * h_size * w_size * c_size;
/// Device memory containing output
cutlass::device_memory::allocation< int > output(size);
int *output_host = (int *)malloc(sizeof(int) * size);
dim3 grid(1,1);
dim3 block(c_size, 1, 1);
hipLaunchKernelGGL(( test::layout::test_nhwc_inverse), dim3(grid), dim3(block) , 0, 0, output.get(),
n_size, h_size, w_size, c_size);
hipError_t result = hipDeviceSynchronize();
ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result);
//
// Verify output
//
cutlass::device_memory::copy_to_host(output_host, output.get(), size);
result = hipGetLastError();
ASSERT_EQ(result, hipSuccess) << "CUDA error: " << hipGetErrorString(result);
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
for (int c_idx = 0; c_idx < c_size; c_idx++) {
int reference_offset = c_idx +
q_idx * c_size +
p_idx * (c_size * w_size) +
n_idx * (c_size * w_size * h_size);
EXPECT_EQ(output_host[reference_offset], reference_offset);
}
}
}
}
}
};
} // namespace layout
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Layout_TensorNHWC, NHWC_1_16_8_32) {
int n_size = 1;
int h_size = 16;
int w_size = 8;
int c_size = 32;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_2_16_8_32) {
int n_size = 2;
int h_size = 16;
int w_size = 8;
int c_size = 32;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_2_16_8_128) {
int n_size = 2;
int h_size = 16;
int w_size = 8;
int c_size = 128;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_4_8_16_128) {
int n_size = 4;
int h_size = 8;
int w_size = 16;
int c_size = 128;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 5df6f37a3c869a8e61f257592644f6270d63734c.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief unit tests for NHWC tensor layout
*/
#include "../common/cutlass_unit_test.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/device_memory.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace layout {
void test_nhwc_layout(int n_size, int h_size, int w_size, int c_size) {
int ldc = c_size + 1;
int ldw = ldc * (w_size + 2);
int ldh = ldw * (h_size + 3);
typedef cutlass::layout::TensorNHWC Tensor;
Tensor::Stride tensor_stride({ ldc, ldw, ldh });
Tensor tensor_nhw_packed_c(tensor_stride);
// test pointer offset
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
for (int c_idx = 0; c_idx < c_size; c_idx++) {
cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, c_idx);
auto ptr_offset = tensor_nhw_packed_c(tensor_coord);
decltype(ptr_offset) reference_offset = c_idx +
q_idx * ldc +
p_idx * ldw +
n_idx * ldh;
EXPECT_EQ(ptr_offset, reference_offset);
}
}
}
}
// test stride
auto stride = tensor_nhw_packed_c.stride();
EXPECT_EQ(stride, tensor_stride);
// test capacity
auto capacity = tensor_nhw_packed_c.capacity(
cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size));
decltype(capacity) referece_capacity = ldh * n_size;
EXPECT_EQ(capacity, referece_capacity);
}
__global__ void test_nhwc_inverse(
int *output, int n_size, int h_size, int w_size, int c_size) {
int ldc = c_size;
int ldw = ldc * w_size;
int ldh = ldw * h_size;
typedef cutlass::layout::TensorNHWC Tensor;
Tensor::Stride tensor_stride({ ldc, ldw, ldh });
Tensor tensor_nhw_packed_c(tensor_stride);
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
cutlass::Tensor4DCoord tensor_coord(n_idx, p_idx, q_idx, threadIdx.x);
int ptr_offset = tensor_nhw_packed_c(tensor_coord);
cutlass::Tensor4DCoord inv_coord = tensor_nhw_packed_c.inverse(ptr_offset);
output[ptr_offset] = tensor_nhw_packed_c(inv_coord);
}
}
}
}
class TestTensorNHWC {
public:
//
// Data members
//
//
// Methods
//
/// Ctor
TestTensorNHWC() {
}
/// Runs the test
void run(int n_size, int h_size, int w_size, int c_size) {
size_t size = n_size * h_size * w_size * c_size;
/// Device memory containing output
cutlass::device_memory::allocation< int > output(size);
int *output_host = (int *)malloc(sizeof(int) * size);
dim3 grid(1,1);
dim3 block(c_size, 1, 1);
test::layout::test_nhwc_inverse<<< grid, block >>>(output.get(),
n_size, h_size, w_size, c_size);
cudaError_t result = cudaDeviceSynchronize();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
//
// Verify output
//
cutlass::device_memory::copy_to_host(output_host, output.get(), size);
result = cudaGetLastError();
ASSERT_EQ(result, cudaSuccess) << "CUDA error: " << cudaGetErrorString(result);
for (int n_idx = 0; n_idx < n_size; n_idx++) {
for (int p_idx = 0; p_idx < h_size; p_idx++) {
for (int q_idx = 0; q_idx < w_size; q_idx++) {
for (int c_idx = 0; c_idx < c_size; c_idx++) {
int reference_offset = c_idx +
q_idx * c_size +
p_idx * (c_size * w_size) +
n_idx * (c_size * w_size * h_size);
EXPECT_EQ(output_host[reference_offset], reference_offset);
}
}
}
}
}
};
} // namespace layout
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Layout_TensorNHWC, NHWC_1_16_8_32) {
int n_size = 1;
int h_size = 16;
int w_size = 8;
int c_size = 32;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_2_16_8_32) {
int n_size = 2;
int h_size = 16;
int w_size = 8;
int c_size = 32;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_2_16_8_128) {
int n_size = 2;
int h_size = 16;
int w_size = 8;
int c_size = 128;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
TEST(Layout_TensorNHWC, NHWC_4_8_16_128) {
int n_size = 4;
int h_size = 8;
int w_size = 16;
int c_size = 128;
test::layout::test_nhwc_layout(n_size, h_size, w_size, c_size);
test::layout::TestTensorNHWC test_nhwc;
test_nhwc.run(n_size, h_size, w_size, c_size);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
13b6061db72c28323e273f842c396de9b89de5d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void copyGlobalCol(float *out, float *in, const int nx, const int ny)
{
unsigned int i = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i<nx && j<ny)
{
out[i*ny+j] = in[i*ny+j];
}
} | 13b6061db72c28323e273f842c396de9b89de5d2.cu | #include "includes.h"
__global__ void copyGlobalCol(float *out, float *in, const int nx, const int ny)
{
unsigned int i = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i<nx && j<ny)
{
out[i*ny+j] = in[i*ny+j];
}
} |
b8f7f2ac5a238f07666e6ac0d41264170bfb2285.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//====================================================
// Towards Maximum GFLOPS
// main.cu : Main Routine
//----------------------------------------------------
// Rev.01 2019.05.11 M.Munetomo
//----------------------------------------------------
// Copyright (C) 2019 Munetomo Maruyama
//====================================================
#include <cinttypes>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#define DATA_SIZE (1024*1024)
#define ITERATION 65536
#define BLOCK_SIZE 1024
#define COEFF_A 0.4999
#define COEFF_B 1.2345
//-----------------
// Device Kernel
//-----------------
__global__ void Device_Kernel(float *buf, const float a, const float b)
{
uint32_t index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= DATA_SIZE) return;
//
float c = buf[index];
//
for (int i = 0; i < ITERATION; i++)
{
c = a * c + b;
}
buf[index] = c;
}
//----------------------------------
// Check Error during CUDA Runtime
//----------------------------------
#define CHECK(func) \
{ \
const hipError_t error = func; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("Code:%d, Reason: %s\n", error, \
hipGetErrorString(error)); \
hipDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
//-----------------
// CPU Time
//-----------------
double CPU_Second(void)
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
//------------------------
// Calculate Giga FLOPS
//------------------------
double GFLOPS(double sec)
{
double operations = (double)ITERATION * (double)DATA_SIZE * 2.0;
double gflops = operations * 1.0e-9f / sec;
return gflops;
}
//----------------------------------
// Main Routine
//----------------------------------
int main(void)
{
// Allocate Host Buffer
float *hBuf;
if ((hBuf = (float*)malloc(sizeof(float) * DATA_SIZE)) == NULL) exit(EXIT_FAILURE);
//
// Generate Random Data
time_t t;
srand((unsigned int)time(&t));
for (uint32_t i = 0; i < DATA_SIZE; i++)
{
hBuf[i] = (float)((rand() % 10000) - 5000) / 1000.0f;
}
//
// Allocate Device Buffer
float *dBuf;
CHECK(hipMalloc((float **) &dBuf, sizeof(float) * DATA_SIZE));
CHECK(hipMemcpy(dBuf, hBuf, sizeof(float) * DATA_SIZE, hipMemcpyHostToDevice));
//
// Grids and Blocks
dim3 block(BLOCK_SIZE);
dim3 grid(((DATA_SIZE) + block.x - 1) / block.x);
//
// Call Kernel (warm up)
hipLaunchKernelGGL(( Device_Kernel) , dim3(grid), dim3(block), 0, 0, dBuf, COEFF_A, COEFF_B);
CHECK(hipDeviceSynchronize());
//
// Call Kernel (measure)
double iStart = CPU_Second();
hipLaunchKernelGGL(( Device_Kernel) , dim3(grid), dim3(block), 0, 0, dBuf, COEFF_A, COEFF_B);
CHECK(hipDeviceSynchronize());
double iElaps = CPU_Second() - iStart;
//
// Display Result
CHECK(hipMemcpy(hBuf, dBuf, sizeof(float) * DATA_SIZE, hipMemcpyDeviceToHost));
for (uint32_t i = 0; i < 10; i++)
{
printf("hBuf[%02d]=%8.4f\n", i, hBuf[i]);
}
printf("Time elapsed %lf sec (%lf GFLOPS)\n", iElaps, GFLOPS(iElaps));
//
// Finish
CHECK(hipFree(dBuf));
if (hBuf) free(hBuf);
//
// Return from this Program
return(EXIT_SUCCESS);
}
//====================================================
// End of Program
//====================================================
| b8f7f2ac5a238f07666e6ac0d41264170bfb2285.cu | //====================================================
// Towards Maximum GFLOPS
// main.cu : Main Routine
//----------------------------------------------------
// Rev.01 2019.05.11 M.Munetomo
//----------------------------------------------------
// Copyright (C) 2019 Munetomo Maruyama
//====================================================
#include <cinttypes>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#define DATA_SIZE (1024*1024)
#define ITERATION 65536
#define BLOCK_SIZE 1024
#define COEFF_A 0.4999
#define COEFF_B 1.2345
//-----------------
// Device Kernel
//-----------------
__global__ void Device_Kernel(float *buf, const float a, const float b)
{
uint32_t index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= DATA_SIZE) return;
//
float c = buf[index];
//
for (int i = 0; i < ITERATION; i++)
{
c = a * c + b;
}
buf[index] = c;
}
//----------------------------------
// Check Error during CUDA Runtime
//----------------------------------
#define CHECK(func) \
{ \
const cudaError_t error = func; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("Code:%d, Reason: %s\n", error, \
cudaGetErrorString(error)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
//-----------------
// CPU Time
//-----------------
double CPU_Second(void)
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
//------------------------
// Calculate Giga FLOPS
//------------------------
double GFLOPS(double sec)
{
double operations = (double)ITERATION * (double)DATA_SIZE * 2.0;
double gflops = operations * 1.0e-9f / sec;
return gflops;
}
//----------------------------------
// Main Routine
//----------------------------------
int main(void)
{
// Allocate Host Buffer
float *hBuf;
if ((hBuf = (float*)malloc(sizeof(float) * DATA_SIZE)) == NULL) exit(EXIT_FAILURE);
//
// Generate Random Data
time_t t;
srand((unsigned int)time(&t));
for (uint32_t i = 0; i < DATA_SIZE; i++)
{
hBuf[i] = (float)((rand() % 10000) - 5000) / 1000.0f;
}
//
// Allocate Device Buffer
float *dBuf;
CHECK(cudaMalloc((float **) &dBuf, sizeof(float) * DATA_SIZE));
CHECK(cudaMemcpy(dBuf, hBuf, sizeof(float) * DATA_SIZE, cudaMemcpyHostToDevice));
//
// Grids and Blocks
dim3 block(BLOCK_SIZE);
dim3 grid(((DATA_SIZE) + block.x - 1) / block.x);
//
// Call Kernel (warm up)
Device_Kernel <<<grid, block>>> (dBuf, COEFF_A, COEFF_B);
CHECK(cudaDeviceSynchronize());
//
// Call Kernel (measure)
double iStart = CPU_Second();
Device_Kernel <<<grid, block>>> (dBuf, COEFF_A, COEFF_B);
CHECK(cudaDeviceSynchronize());
double iElaps = CPU_Second() - iStart;
//
// Display Result
CHECK(cudaMemcpy(hBuf, dBuf, sizeof(float) * DATA_SIZE, cudaMemcpyDeviceToHost));
for (uint32_t i = 0; i < 10; i++)
{
printf("hBuf[%02d]=%8.4f\n", i, hBuf[i]);
}
printf("Time elapsed %lf sec (%lf GFLOPS)\n", iElaps, GFLOPS(iElaps));
//
// Finish
CHECK(cudaFree(dBuf));
if (hBuf) free(hBuf);
//
// Return from this Program
return(EXIT_SUCCESS);
}
//====================================================
// End of Program
//====================================================
|
1b46afac65497cfbba21750ef75810388bae3888.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "Rippling.h"
#include "Device.h"
using std::cout;
using std::endl;
__global__
extern void rippling(uchar4* ptrDevPixels, int w, int h, float t);
Rippling::Rippling(int w, int h, float dt, string title)
{
assert(w == h);
// Inputs
this->w = w;
this->h = h;
this->dt = dt;
// Tools
this->dg = dim3(64, 64, 1); // Block
this->db = dim3(16, 16, 1); // Threads
this->t = 0;
// Outputs
this->title = title;
Device::assertDim(dg, db);
}
Rippling::~Rippling()
{
}
void Rippling::process(uchar4* ptrDevPixels, int w, int h)
{hipLaunchKernelGGL((
rippling), dim3(this->dg), dim3(this->db), 0, 0, ptrDevPixels, w, h, t);
}
void Rippling::animationStep()
{
t += dt;
}
float Rippling::getAnimationPara()
{
return t;
}
int Rippling::getW()
{
return w;
}
int Rippling::getH()
{
return h;
}
string Rippling::getTitle()
{
return title;
}
| 1b46afac65497cfbba21750ef75810388bae3888.cu | #include <iostream>
#include <assert.h>
#include "Rippling.h"
#include "Device.h"
using std::cout;
using std::endl;
__global__
extern void rippling(uchar4* ptrDevPixels, int w, int h, float t);
Rippling::Rippling(int w, int h, float dt, string title)
{
assert(w == h);
// Inputs
this->w = w;
this->h = h;
this->dt = dt;
// Tools
this->dg = dim3(64, 64, 1); // Block
this->db = dim3(16, 16, 1); // Threads
this->t = 0;
// Outputs
this->title = title;
Device::assertDim(dg, db);
}
Rippling::~Rippling()
{
}
void Rippling::process(uchar4* ptrDevPixels, int w, int h)
{
rippling<<<this->dg, this->db>>>(ptrDevPixels, w, h, t);
}
void Rippling::animationStep()
{
t += dt;
}
float Rippling::getAnimationPara()
{
return t;
}
int Rippling::getW()
{
return w;
}
int Rippling::getH()
{
return h;
}
string Rippling::getTitle()
{
return title;
}
|
7d4931badfc6c21b08b60b37fd68baa88cd08c20.hip | // !!! This is a file automatically generated by hipify!!!
//header files included
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <string>
#include <sstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <ctime>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
//declaring the tile width and height
//for tile based matrix multiplication
#define TILE_WIDTH 64
#define TILE_HEIGHT 64
//Namespace for std
using namespace std;
//structure declaration for storing rows and columns for a matrix
struct matrix{
unsigned int rows; //storing rows of a matrix
unsigned int cols; //storing columns of a matrix
};
//handle error alias name declaration
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//global kernal for matrix multiplication, takes in input matrices and sizes, and multiplies them
//matrix multiplication is being done tile by tile
__global__ void matrix_mult(float* array1, unsigned int rows1, unsigned int cols1, float* array2, unsigned int rows2, unsigned int cols2, float* array3)
{
//shared memory takes one tile at a time
__shared__ float S1[TILE_WIDTH][TILE_HEIGHT]; //to store tiles for array 1
__shared__ float S2[TILE_HEIGHT][TILE_WIDTH]; //to store tiles for array 2
//threads x and y index for the current block
unsigned int tx=threadIdx.x;
unsigned int ty=threadIdx.y;
unsigned int c=blockIdx.x*blockDim.x + threadIdx.x; //row value using x-index of current thread
unsigned int r=blockIdx.y*blockDim.y + threadIdx.y; //column value using y-index of current thread
unsigned int idx=c*rows1+r; //column major index, using row and column value
float val=0; //register to store multiplication result initialized to zero
for(int m=0; m<1+((rows2-1)/TILE_WIDTH);m++) //going over all tiles one by one, with each m
{
int var1=m*TILE_WIDTH+tx ; //x thread value for current tile
int var2=m*TILE_WIDTH+ty ; //y thread value for current tile
//copying a tile from array1
if (r < rows1 && var1 < rows2) //if the value is associated to a valid matrix coordinate in array1 then store it to shared memory S1
S1[ty][tx]=array1[r + var1*rows1];//storing a "valid" value from array to shared memory
else
S1[ty][tx]=0; //storing zero, since there is no valid value
__syncthreads(); //syncing all threads once shared memory S1 is stored
//copying a tile from array2
if(c < cols2 && var2 < rows2) //if value is associates to a valid matrix coordinate in array2 then store it to shared memory S2
S2[ty][tx]=array2[var2+rows2*c]; //storing the valid value
else
S2[ty][tx]=0; //storing zero, since no valid value
__syncthreads(); //synchronizing threads
for(int i=0; i<TILE_WIDTH;i++) //going over entire tile, ty row in S1 and tx column in S2
val+=S1[ty][i]*S2[i][tx]; //and multiplying elements
__syncthreads(); //synchronizing threads
}
if(r < rows1 && c< cols2) //removing degenerate cases
array3[idx]=val; //saving multiplication result to global memory
}
int main(int argc, char* argv[])
{
if(argc != 4) //there should be four arguments, Usage: prog matrix1.mtx matrix2.mtx matrix3.mtx
return 1; //exit and return an error
ifstream infile_A, infile_B; //reading the input matrices
// *****************************************************************************
// Matrix A //
//******************************************************************************
infile_A.open(argv[1],ios::binary|ios::in|ios::ate);
//getting end and beginning of the file
infile_A.seekg(0,ios::end);
infile_A.seekg(0,ios::beg);
//memory allocation
matrix M_A;
infile_A.read(reinterpret_cast<char*>(&M_A),2*sizeof(unsigned int));
float* array_A=(float*)malloc(M_A.rows*M_A.cols*sizeof(float)); //column major
infile_A.read(reinterpret_cast<char*>(array_A),M_A.rows*M_A.cols);
infile_A.close();
// *****************************************************************************
// Matrix B //
//******************************************************************************
infile_B.open(argv[2],ios::binary|ios::in|ios::ate);
//getting end and beginning of the file
infile_B.seekg(0,ios::end);
infile_B.seekg(0,ios::beg);
//memory allocation
matrix M_B;
infile_B.read(reinterpret_cast<char*>(&M_B),2*sizeof(unsigned int));
float* array_B=(float*)malloc(M_B.rows*M_B.cols*sizeof(float)); //column major
infile_B.read(reinterpret_cast<char*>(array_B),M_B.rows*M_B.cols);
infile_B.close();
if(M_A.cols!=M_B.rows) //checking if the two matrices can be multiplied
{
cout<<"Illegal matrix sizes: "<<M_A.cols<<" != "<<M_B.rows<<endl;
return 1;
}
// *****************************************************************************
// allocate to the host //
//******************************************************************************
float* array_C=(float*)malloc(M_A.rows*M_B.cols*sizeof(float));//array to store gpu result in column major format
//GPU DEVICE PROPERTIES and selecting a GPU for calculation
int nDevices;
hipGetDeviceCount(&nDevices);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0); //using GPU0
//BLOCK AND GRID SIZE DECLARATION
float thread_block=sqrt(prop.maxThreadsPerBlock); //2D blocks used
dim3 DimGrid(ceil(M_B.cols/thread_block),ceil(M_A.rows/thread_block),1); //image saved as a 2D grid
dim3 DimBlock(thread_block,thread_block,1);
size_t Sbytes = 2* DimBlock.x * DimBlock.y ; //2 arrays used in the calculation, hence 2 * DimBlock.x * DimBlock.y
//Checking if sufficient shared memory available or not
if(prop.sharedMemPerBlock < Sbytes){
std::cout<<"ERROR: insufficient shared memory"<<std::endl;
exit(1);
}
// *****************************************************************************
// allocate to the GPU //
//******************************************************************************
float *array_A_gpu, *array_B_gpu, *array_C_gpu;//gpu arrays declared
hipMalloc(&array_A_gpu,M_A.rows*M_A.cols*sizeof(float)); //allocate space to store arrayA
hipMalloc(&array_B_gpu,M_B.rows*M_B.cols*sizeof(float)); //allocate space to store arrayB
hipMalloc(&array_C_gpu,M_A.rows*M_B.cols*sizeof(float)); //allocate space to store gpu result
//COPY TO GPU MEMORY
hipMemcpy(array_A_gpu, array_A, M_A.rows*M_A.cols*sizeof(float), hipMemcpyHostToDevice);//copy arrayA to gpu
hipMemcpy(array_B_gpu, array_B, M_B.rows*M_B.cols*sizeof(float), hipMemcpyHostToDevice);//copy arrayB to gpu
hipMemcpy(array_C_gpu, array_C, M_A.rows*M_B.cols*sizeof(float), hipMemcpyHostToDevice);//copy arrayC to gpu
// *****************************************************************************
// allocate to the GPU //
//******************************************************************************
//time measurement for matrix multiplication
hipEvent_t start1, stop1;
hipEventCreate(&start1);
hipEventCreate(&stop1);
//MATRIX MULTIPLICATION USING KERNEL
hipEventRecord(start1);
hipLaunchKernelGGL(( matrix_mult), dim3(DimGrid), dim3(DimBlock), Sbytes, 0, array_A_gpu,M_A.rows,M_A.cols,array_B_gpu,M_B.rows,M_B.cols,array_C_gpu);//calling the kernel
hipEventRecord(stop1);
hipEventSynchronize(stop1);
float milliseconds1 = 0;//storing the execution time in milliseconds
hipEventElapsedTime(&milliseconds1, start1, stop1);//get the time in milliseconds
float msecPerMatrixMul = milliseconds1;
double flopsPerMatrixMul = 2.0 * (double) M_A.rows *(double) M_B.cols *(double) M_A.cols;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec",gigaFlops,msecPerMatrixMul);
//copy to CPU MEMORY
hipMemcpy(array_C, array_C_gpu, M_A.rows*M_B.cols*sizeof(float), hipMemcpyDeviceToHost);//copying result of multiplication from gpu to cpu
// *****************************************************************************
// Saving the result //
//******************************************************************************
//SAVING THE OUTPUT MATRIX
ofstream ofile(argv[3], ios::binary);
ofile.write((char*) &M_A.rows, sizeof(unsigned int));//writing the rows
ofile.write((char*) &M_B.cols, sizeof(unsigned int));//writing the cols
ofile.write((char*) array_C , M_A.rows*M_B.cols*sizeof(float));//writing all elements
return 0;
}
| 7d4931badfc6c21b08b60b37fd68baa88cd08c20.cu | //header files included
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <string>
#include <sstream>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <ctime>
#include <vector>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas_v2.h>
//declaring the tile width and height
//for tile based matrix multiplication
#define TILE_WIDTH 64
#define TILE_HEIGHT 64
//Namespace for std
using namespace std;
//structure declaration for storing rows and columns for a matrix
struct matrix{
unsigned int rows; //storing rows of a matrix
unsigned int cols; //storing columns of a matrix
};
//handle error alias name declaration
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//global kernal for matrix multiplication, takes in input matrices and sizes, and multiplies them
//matrix multiplication is being done tile by tile
__global__ void matrix_mult(float* array1, unsigned int rows1, unsigned int cols1, float* array2, unsigned int rows2, unsigned int cols2, float* array3)
{
//shared memory takes one tile at a time
__shared__ float S1[TILE_WIDTH][TILE_HEIGHT]; //to store tiles for array 1
__shared__ float S2[TILE_HEIGHT][TILE_WIDTH]; //to store tiles for array 2
//threads x and y index for the current block
unsigned int tx=threadIdx.x;
unsigned int ty=threadIdx.y;
unsigned int c=blockIdx.x*blockDim.x + threadIdx.x; //row value using x-index of current thread
unsigned int r=blockIdx.y*blockDim.y + threadIdx.y; //column value using y-index of current thread
unsigned int idx=c*rows1+r; //column major index, using row and column value
float val=0; //register to store multiplication result initialized to zero
for(int m=0; m<1+((rows2-1)/TILE_WIDTH);m++) //going over all tiles one by one, with each m
{
int var1=m*TILE_WIDTH+tx ; //x thread value for current tile
int var2=m*TILE_WIDTH+ty ; //y thread value for current tile
//copying a tile from array1
if (r < rows1 && var1 < rows2) //if the value is associated to a valid matrix coordinate in array1 then store it to shared memory S1
S1[ty][tx]=array1[r + var1*rows1];//storing a "valid" value from array to shared memory
else
S1[ty][tx]=0; //storing zero, since there is no valid value
__syncthreads(); //syncing all threads once shared memory S1 is stored
//copying a tile from array2
if(c < cols2 && var2 < rows2) //if value is associates to a valid matrix coordinate in array2 then store it to shared memory S2
S2[ty][tx]=array2[var2+rows2*c]; //storing the valid value
else
S2[ty][tx]=0; //storing zero, since no valid value
__syncthreads(); //synchronizing threads
for(int i=0; i<TILE_WIDTH;i++) //going over entire tile, ty row in S1 and tx column in S2
val+=S1[ty][i]*S2[i][tx]; //and multiplying elements
__syncthreads(); //synchronizing threads
}
if(r < rows1 && c< cols2) //removing degenerate cases
array3[idx]=val; //saving multiplication result to global memory
}
int main(int argc, char* argv[])
{
if(argc != 4) //there should be four arguments, Usage: prog matrix1.mtx matrix2.mtx matrix3.mtx
return 1; //exit and return an error
ifstream infile_A, infile_B; //reading the input matrices
// *****************************************************************************
// Matrix A //
//******************************************************************************
infile_A.open(argv[1],ios::binary|ios::in|ios::ate);
//getting end and beginning of the file
infile_A.seekg(0,ios::end);
infile_A.seekg(0,ios::beg);
//memory allocation
matrix M_A;
infile_A.read(reinterpret_cast<char*>(&M_A),2*sizeof(unsigned int));
float* array_A=(float*)malloc(M_A.rows*M_A.cols*sizeof(float)); //column major
infile_A.read(reinterpret_cast<char*>(array_A),M_A.rows*M_A.cols);
infile_A.close();
// *****************************************************************************
// Matrix B //
//******************************************************************************
infile_B.open(argv[2],ios::binary|ios::in|ios::ate);
//getting end and beginning of the file
infile_B.seekg(0,ios::end);
infile_B.seekg(0,ios::beg);
//memory allocation
matrix M_B;
infile_B.read(reinterpret_cast<char*>(&M_B),2*sizeof(unsigned int));
float* array_B=(float*)malloc(M_B.rows*M_B.cols*sizeof(float)); //column major
infile_B.read(reinterpret_cast<char*>(array_B),M_B.rows*M_B.cols);
infile_B.close();
if(M_A.cols!=M_B.rows) //checking if the two matrices can be multiplied
{
cout<<"Illegal matrix sizes: "<<M_A.cols<<" != "<<M_B.rows<<endl;
return 1;
}
// *****************************************************************************
// allocate to the host //
//******************************************************************************
float* array_C=(float*)malloc(M_A.rows*M_B.cols*sizeof(float));//array to store gpu result in column major format
//GPU DEVICE PROPERTIES and selecting a GPU for calculation
int nDevices;
cudaGetDeviceCount(&nDevices);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0); //using GPU0
//BLOCK AND GRID SIZE DECLARATION
float thread_block=sqrt(prop.maxThreadsPerBlock); //2D blocks used
dim3 DimGrid(ceil(M_B.cols/thread_block),ceil(M_A.rows/thread_block),1); //image saved as a 2D grid
dim3 DimBlock(thread_block,thread_block,1);
size_t Sbytes = 2* DimBlock.x * DimBlock.y ; //2 arrays used in the calculation, hence 2 * DimBlock.x * DimBlock.y
//Checking if sufficient shared memory available or not
if(prop.sharedMemPerBlock < Sbytes){
std::cout<<"ERROR: insufficient shared memory"<<std::endl;
exit(1);
}
// *****************************************************************************
// allocate to the GPU //
//******************************************************************************
float *array_A_gpu, *array_B_gpu, *array_C_gpu;//gpu arrays declared
cudaMalloc(&array_A_gpu,M_A.rows*M_A.cols*sizeof(float)); //allocate space to store arrayA
cudaMalloc(&array_B_gpu,M_B.rows*M_B.cols*sizeof(float)); //allocate space to store arrayB
cudaMalloc(&array_C_gpu,M_A.rows*M_B.cols*sizeof(float)); //allocate space to store gpu result
//COPY TO GPU MEMORY
cudaMemcpy(array_A_gpu, array_A, M_A.rows*M_A.cols*sizeof(float), cudaMemcpyHostToDevice);//copy arrayA to gpu
cudaMemcpy(array_B_gpu, array_B, M_B.rows*M_B.cols*sizeof(float), cudaMemcpyHostToDevice);//copy arrayB to gpu
cudaMemcpy(array_C_gpu, array_C, M_A.rows*M_B.cols*sizeof(float), cudaMemcpyHostToDevice);//copy arrayC to gpu
// *****************************************************************************
// allocate to the GPU //
//******************************************************************************
//time measurement for matrix multiplication
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
//MATRIX MULTIPLICATION USING KERNEL
cudaEventRecord(start1);
matrix_mult<<<DimGrid, DimBlock, Sbytes>>>(array_A_gpu,M_A.rows,M_A.cols,array_B_gpu,M_B.rows,M_B.cols,array_C_gpu);//calling the kernel
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
float milliseconds1 = 0;//storing the execution time in milliseconds
cudaEventElapsedTime(&milliseconds1, start1, stop1);//get the time in milliseconds
float msecPerMatrixMul = milliseconds1;
double flopsPerMatrixMul = 2.0 * (double) M_A.rows *(double) M_B.cols *(double) M_A.cols;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec",gigaFlops,msecPerMatrixMul);
//copy to CPU MEMORY
cudaMemcpy(array_C, array_C_gpu, M_A.rows*M_B.cols*sizeof(float), cudaMemcpyDeviceToHost);//copying result of multiplication from gpu to cpu
// *****************************************************************************
// Saving the result //
//******************************************************************************
//SAVING THE OUTPUT MATRIX
ofstream ofile(argv[3], ios::binary);
ofile.write((char*) &M_A.rows, sizeof(unsigned int));//writing the rows
ofile.write((char*) &M_B.cols, sizeof(unsigned int));//writing the cols
ofile.write((char*) array_C , M_A.rows*M_B.cols*sizeof(float));//writing all elements
return 0;
}
|
d75f721991f9e86ac0721fd44db09eac0b8a5dee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cuda functions to execute collision detection
// and synchrozie data between the detector class
// and the gpu hosted memory
// reference: https://zhuanlan.zhihu.com/p/34587739
#include "collide.h"
#include <iostream>
#include <cstdio>
#include <glm/glm.hpp>
using namespace std;
using namespace glm;
// define host data
glm::vec3 pos[MAX_BALL_COUNT], velocity[MAX_BALL_COUNT];
float mass[MAX_BALL_COUNT], radius[MAX_BALL_COUNT], cor[MAX_BALL_COUNT];
int b1[MAX_COLLISIONS], b2[MAX_COLLISIONS];
int b[MAX_COLLISIONS], p[MAX_COLLISIONS];
// define device data
__device__ glm::vec3 _pos[MAX_BALL_COUNT], _velocity[MAX_BALL_COUNT];
__device__ float _mass[MAX_BALL_COUNT], _radius[MAX_BALL_COUNT], _cor[MAX_BALL_COUNT];
__device__ int _b1[MAX_COLLISIONS], _b2[MAX_COLLISIONS];
__device__ int _b[MAX_COLLISIONS], _p[MAX_COLLISIONS];
// sychronize data between device and host
void reverseSyncVelocity(int n) {
hipMemcpyFromSymbol(velocity, _velocity, n * sizeof(vec3));
}
__device__ void printv(vec3 val) {
printf("%f %f %f\n", val.x, val.y, val.z);
}
__device__ void printfl(float val) {
printf("%f\n", val);
}
void syncVars(int n) {
hipMemcpyToSymbol(_pos, pos, n * sizeof(vec3), 0);
hipMemcpyToSymbol(_velocity, velocity, n * sizeof(vec3), 0);
}
void syncConsts(int n) {
hipMemcpyToSymbol(_mass, mass, n * sizeof(float), 0);
hipMemcpyToSymbol(_radius, radius, n * sizeof(float), 0);
hipMemcpyToSymbol(_cor, cor, n * sizeof(float), 0);
}
void syncBallPairs(int n) {
hipMemcpyToSymbol(_b1, b1, n * sizeof(int), 0);
hipMemcpyToSymbol(_b2, b2, n * sizeof(int), 0);
}
void syncBallPlanePairs(int n) {
hipMemcpyToSymbol(_b, b, n * sizeof(int), 0);
hipMemcpyToSymbol(_p, p, n * sizeof(int), 0);
}
// synchronization between cuda and detector class
void initBallCuda(vector<Ball*> balls, int n) {
for (int i = 0; i < n; i++) {
pos[i] = balls[i]->pos;
velocity[i] = balls[i]->velocity;
mass[i] = balls[i]->mass;
cor[i] = balls[i]->cor;
radius[i] = balls[i]->radius;
}
syncConsts(n);
syncVars(n);
}
void copyBallVarCuda(vector<Ball*> balls, int n) {
for (int i = 0; i < n; i++) {
pos[i] = balls[i]->pos;
velocity[i] = balls[i]->velocity;
}
syncVars(n);
return;
}
void updateVelocityCuda(vector<Ball*> balls, int n) {
reverseSyncVelocity(n);
for (int i = 0; i < n; i++) {
balls[i]->velocity = velocity[i];
}
}
void copyBallPairCuda(vector<BallPair> pairs, int numPairs) {
for (int i = 0; i < numPairs && i < MAX_COLLISIONS; i++) {
b1[i] = pairs[i].b1;
b2[i] = pairs[i].b2;
}
syncBallPairs(numPairs);
}
void copyBallPlanePairCuda(vector<BallPlanePair> pairs, int numPairs) {
for (int i = 0; i < numPairs && i < MAX_COLLISIONS; i++) {
b[i] = pairs[i].b;
p[i] = pairs[i].p;
}
syncBallPlanePairs(numPairs);
}
// utility function to compute the plane normal vector
__device__
vec3 _planeDir(int p) {
switch (p) {
case LEFT:
return vec3(-1.0f, 0.0f, 0.0f);
break;
case RIGHT:
return vec3(1.0f, 0.0f, 0.0f);
break;
case BACK:
return vec3(0.0f, 0.0f, -1.0f);
break;
case FRONT:
return vec3(0.0f, 0.0f, 1.0f);
break;
case TOP:
return vec3(0.0f, 1.0f, 0.0f);
break;
case BOTTOM:
return vec3(0.0f, -1.0f, 0.0f);
break;
default:
return vec3(0.0f, 0.0f, 0.0f);
}
}
// kernel functions
__global__
void ballCollideKernel(int numPairs) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numPairs; i += stride) {
int index1 = _b1[i];
int index2 = _b2[i];
vec3 pos1 = _pos[index1];
vec3 pos2 = _pos[index2];
float r = _radius[index1] + _radius[index2];
vec3 dp = pos1 - pos2;
vec3 v1 = _velocity[index1];
vec3 v2 = _velocity[index2];
vec3 dv = v1 - v2;
if (dot(dp, dp) < r * r && dot(dv, dp) <= 0) {
// balls are close enough and are approaching
// so the collision will happen
float cor1 = _cor[index1];
float cor2 = _cor[index2];
float c = min(cor1, cor2);
float m1 = _mass[index1];
float m2 = _mass[index2];
// use momentum & energy preservation theorem
// to solve the velocities
vec3 dpvec = normalize(dp);
vec3 proj1 = dot(v1, dpvec) * dpvec;
vec3 proj2 = dot(v2, dpvec) * dpvec;
vec3 dv1 = ((1 + c) * m2 * (proj2 - proj1)) / (m1 + m2);
vec3 dv2 = ((1 + c) * m1 * (proj1 - proj2)) / (m1 + m2);
_velocity[index1] += dv1;
_velocity[index2] += dv2;
}
}
}
__global__
void ballPlaneCollideKernel(int numPairs) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numPairs; i += stride) {
int ballIndex = _b[i];
int planeIndex = _p[i];
vec3 dir = _planeDir(planeIndex);
vec3 p = _pos[ballIndex];
vec3 v = _velocity[ballIndex];
float r = _radius[ballIndex];
if (dot(p, dir) + r > SIZE / 2 && dot(v, dir) > 0) {
// the ball and the plane are close enough and the ball is approaching
// so the collision will happen
float c = _cor[ballIndex];
// assume the plane is of infinity mass
vec3 dv = (1 + c) * dir * dot(v, dir);
_velocity[ballIndex] -= dv;
}
}
}
// interfaces to the detector
void ballCollideCuda(vector<BallPair>& pairs, vector<Ball*> balls) {
int numBalls = balls.size();
int numPairs = pairs.size();
copyBallPairCuda(pairs, numPairs);
dim3 blockSize(64);
dim3 gridSize((numBalls + blockSize.x - 1) / blockSize.x);
// call kernel function
hipLaunchKernelGGL(( ballCollideKernel) , dim3(gridSize), dim3(blockSize), 0, 0, numPairs);
}
void ballPlaneCollideCuda(vector<BallPlanePair>& pairs, vector<Ball*> balls) {
int numBalls = balls.size();
int numPairs = pairs.size();
copyBallPlanePairCuda(pairs, numPairs);
dim3 blockSize(64);
dim3 gridSize((numBalls + blockSize.x - 1) / blockSize.x);
// call kernel function
hipLaunchKernelGGL(( ballPlaneCollideKernel) , dim3(gridSize), dim3(blockSize), 0, 0, numPairs);
auto result = hipGetLastError();
}
| d75f721991f9e86ac0721fd44db09eac0b8a5dee.cu | // cuda functions to execute collision detection
// and synchrozie data between the detector class
// and the gpu hosted memory
// reference: https://zhuanlan.zhihu.com/p/34587739
#include "collide.h"
#include <iostream>
#include <cstdio>
#include <glm/glm.hpp>
using namespace std;
using namespace glm;
// define host data
glm::vec3 pos[MAX_BALL_COUNT], velocity[MAX_BALL_COUNT];
float mass[MAX_BALL_COUNT], radius[MAX_BALL_COUNT], cor[MAX_BALL_COUNT];
int b1[MAX_COLLISIONS], b2[MAX_COLLISIONS];
int b[MAX_COLLISIONS], p[MAX_COLLISIONS];
// define device data
__device__ glm::vec3 _pos[MAX_BALL_COUNT], _velocity[MAX_BALL_COUNT];
__device__ float _mass[MAX_BALL_COUNT], _radius[MAX_BALL_COUNT], _cor[MAX_BALL_COUNT];
__device__ int _b1[MAX_COLLISIONS], _b2[MAX_COLLISIONS];
__device__ int _b[MAX_COLLISIONS], _p[MAX_COLLISIONS];
// sychronize data between device and host
void reverseSyncVelocity(int n) {
cudaMemcpyFromSymbol(velocity, _velocity, n * sizeof(vec3));
}
__device__ void printv(vec3 val) {
printf("%f %f %f\n", val.x, val.y, val.z);
}
__device__ void printfl(float val) {
printf("%f\n", val);
}
void syncVars(int n) {
cudaMemcpyToSymbol(_pos, pos, n * sizeof(vec3), 0);
cudaMemcpyToSymbol(_velocity, velocity, n * sizeof(vec3), 0);
}
void syncConsts(int n) {
cudaMemcpyToSymbol(_mass, mass, n * sizeof(float), 0);
cudaMemcpyToSymbol(_radius, radius, n * sizeof(float), 0);
cudaMemcpyToSymbol(_cor, cor, n * sizeof(float), 0);
}
void syncBallPairs(int n) {
cudaMemcpyToSymbol(_b1, b1, n * sizeof(int), 0);
cudaMemcpyToSymbol(_b2, b2, n * sizeof(int), 0);
}
void syncBallPlanePairs(int n) {
cudaMemcpyToSymbol(_b, b, n * sizeof(int), 0);
cudaMemcpyToSymbol(_p, p, n * sizeof(int), 0);
}
// synchronization between cuda and detector class
void initBallCuda(vector<Ball*> balls, int n) {
for (int i = 0; i < n; i++) {
pos[i] = balls[i]->pos;
velocity[i] = balls[i]->velocity;
mass[i] = balls[i]->mass;
cor[i] = balls[i]->cor;
radius[i] = balls[i]->radius;
}
syncConsts(n);
syncVars(n);
}
void copyBallVarCuda(vector<Ball*> balls, int n) {
for (int i = 0; i < n; i++) {
pos[i] = balls[i]->pos;
velocity[i] = balls[i]->velocity;
}
syncVars(n);
return;
}
void updateVelocityCuda(vector<Ball*> balls, int n) {
reverseSyncVelocity(n);
for (int i = 0; i < n; i++) {
balls[i]->velocity = velocity[i];
}
}
void copyBallPairCuda(vector<BallPair> pairs, int numPairs) {
for (int i = 0; i < numPairs && i < MAX_COLLISIONS; i++) {
b1[i] = pairs[i].b1;
b2[i] = pairs[i].b2;
}
syncBallPairs(numPairs);
}
void copyBallPlanePairCuda(vector<BallPlanePair> pairs, int numPairs) {
for (int i = 0; i < numPairs && i < MAX_COLLISIONS; i++) {
b[i] = pairs[i].b;
p[i] = pairs[i].p;
}
syncBallPlanePairs(numPairs);
}
// utility function to compute the plane normal vector
__device__
vec3 _planeDir(int p) {
switch (p) {
case LEFT:
return vec3(-1.0f, 0.0f, 0.0f);
break;
case RIGHT:
return vec3(1.0f, 0.0f, 0.0f);
break;
case BACK:
return vec3(0.0f, 0.0f, -1.0f);
break;
case FRONT:
return vec3(0.0f, 0.0f, 1.0f);
break;
case TOP:
return vec3(0.0f, 1.0f, 0.0f);
break;
case BOTTOM:
return vec3(0.0f, -1.0f, 0.0f);
break;
default:
return vec3(0.0f, 0.0f, 0.0f);
}
}
// kernel functions
__global__
void ballCollideKernel(int numPairs) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numPairs; i += stride) {
int index1 = _b1[i];
int index2 = _b2[i];
vec3 pos1 = _pos[index1];
vec3 pos2 = _pos[index2];
float r = _radius[index1] + _radius[index2];
vec3 dp = pos1 - pos2;
vec3 v1 = _velocity[index1];
vec3 v2 = _velocity[index2];
vec3 dv = v1 - v2;
if (dot(dp, dp) < r * r && dot(dv, dp) <= 0) {
// balls are close enough and are approaching
// so the collision will happen
float cor1 = _cor[index1];
float cor2 = _cor[index2];
float c = min(cor1, cor2);
float m1 = _mass[index1];
float m2 = _mass[index2];
// use momentum & energy preservation theorem
// to solve the velocities
vec3 dpvec = normalize(dp);
vec3 proj1 = dot(v1, dpvec) * dpvec;
vec3 proj2 = dot(v2, dpvec) * dpvec;
vec3 dv1 = ((1 + c) * m2 * (proj2 - proj1)) / (m1 + m2);
vec3 dv2 = ((1 + c) * m1 * (proj1 - proj2)) / (m1 + m2);
_velocity[index1] += dv1;
_velocity[index2] += dv2;
}
}
}
__global__
void ballPlaneCollideKernel(int numPairs) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numPairs; i += stride) {
int ballIndex = _b[i];
int planeIndex = _p[i];
vec3 dir = _planeDir(planeIndex);
vec3 p = _pos[ballIndex];
vec3 v = _velocity[ballIndex];
float r = _radius[ballIndex];
if (dot(p, dir) + r > SIZE / 2 && dot(v, dir) > 0) {
// the ball and the plane are close enough and the ball is approaching
// so the collision will happen
float c = _cor[ballIndex];
// assume the plane is of infinity mass
vec3 dv = (1 + c) * dir * dot(v, dir);
_velocity[ballIndex] -= dv;
}
}
}
// interfaces to the detector
void ballCollideCuda(vector<BallPair>& pairs, vector<Ball*> balls) {
int numBalls = balls.size();
int numPairs = pairs.size();
copyBallPairCuda(pairs, numPairs);
dim3 blockSize(64);
dim3 gridSize((numBalls + blockSize.x - 1) / blockSize.x);
// call kernel function
ballCollideKernel <<<gridSize, blockSize>>> (numPairs);
}
void ballPlaneCollideCuda(vector<BallPlanePair>& pairs, vector<Ball*> balls) {
int numBalls = balls.size();
int numPairs = pairs.size();
copyBallPlanePairCuda(pairs, numPairs);
dim3 blockSize(64);
dim3 gridSize((numBalls + blockSize.x - 1) / blockSize.x);
// call kernel function
ballPlaneCollideKernel <<<gridSize, blockSize>>> (numPairs);
auto result = cudaGetLastError();
}
|
374a41cac4aa00dd3f262f841370bf9d3419b1b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates how HyperQ allows supporting devices to avoid false
// dependencies between kernels in different streams.
//
// - Devices without HyperQ will run a maximum of two kernels at a time (one
// kernel_A and one kernel_B).
// - Devices with HyperQ will run up to 32 kernels simultaneously.
#include <stdio.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_functions.h>
#include <helper_cuda.h>
const char *sSDKsample = "hyperQ";
// This subroutine does no real work but runs for at least the specified number
// of clock ticks.
__device__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t) (end_clock - start_clock);
}
d_o[0] = clock_offset;
}
// We create two identical kernels calling clock_block(), we create two so that
// we can identify dependencies in the profile timeline ("kernel_B" is always
// dependent on "kernel_A" in the same stream).
__global__ void kernel_A(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_B(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
// Single-warp reduction kernel (note: this is not optimized for simplicity)
__global__ void sum(clock_t *d_clocks, int N)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x ; i < N ; i += blockDim.x)
{
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
cg::sync(cta);
for (int i = warpSize / 2 ; i > 0 ; i /= 2)
{
if (threadIdx.x < i)
{
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
cg::sync(cta);
}
if (threadIdx.x == 0)
{
d_clocks[0] = s_clocks[0];
}
}
int main(int argc, char **argv)
{
int nstreams = 32; // One stream for each pair of kernels
float kernel_time = 10; // Time each kernel should run in ms
float elapsed_time;
int cuda_device = 0;
printf("starting %s...\n", sSDKsample);
// Get number of streams (if overridden on the command line)
if (checkCmdLineFlag(argc, (const char **)argv, "nstreams"))
{
nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "nstreams");
}
// Use command-line specified CUDA device, otherwise use device with
// highest Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
// Get device properties
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
// HyperQ is available in devices of Compute Capability 3.5 and higher
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf(" CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf(" CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate host memory for the output (reduced to a single value)
clock_t *a = 0;
checkCudaErrors(hipHostMalloc((void **)&a, sizeof(clock_t)));
// Allocate device memory for the output (one value for each kernel)
clock_t *d_a = 0;
checkCudaErrors(hipMalloc((void **)&d_a, 2 * nstreams * sizeof(clock_t)));
// Allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
for (int i = 0 ; i < nstreams ; i++)
{
checkCudaErrors(hipStreamCreate(&(streams[i])));
}
// Create CUDA event handles
hipEvent_t start_event, stop_event;
checkCudaErrors(hipEventCreate(&start_event));
checkCudaErrors(hipEventCreate(&stop_event));
// Target time per kernel is kernel_time ms, clockRate is in KHz
// Target number of clocks = target time * clock frequency
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to prevent hangs reduce time_clocks.
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 100));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
clock_t total_clocks = 0;
// Start the clock
checkCudaErrors(hipEventRecord(start_event, 0));
// Queue pairs of {kernel_A, kernel_B} in separate streams
for (int i = 0 ; i < nstreams ; ++i)
{
hipLaunchKernelGGL(( kernel_A), dim3(1),dim3(1),0,streams[i], &d_a[2*i], time_clocks);
total_clocks += time_clocks;
hipLaunchKernelGGL(( kernel_B), dim3(1),dim3(1),0,streams[i], &d_a[2*i+1], time_clocks);
total_clocks += time_clocks;
}
// Stop the clock in stream 0 (i.e. all previous kernels will be complete)
checkCudaErrors(hipEventRecord(stop_event, 0));
// At this point the CPU has dispatched all work for the GPU and can
// continue processing other tasks in parallel. In this sample we just want
// to wait until all work is done so we use a blocking hipMemcpy below.
// Run the sum kernel and copy the result back to host
hipLaunchKernelGGL(( sum), dim3(1),dim3(32), 0, 0, d_a, 2 * nstreams);
checkCudaErrors(hipMemcpy(a, d_a, sizeof(clock_t), hipMemcpyDeviceToHost));
// stop_event will have been recorded but including the synchronize here to
// prevent copy/paste errors!
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs and %.3fs\n", nstreams, (nstreams + 1) * kernel_time / 1000.0f, 2 * nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, 2 * kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] >= total_clocks);
// Release resources
for (int i = 0 ; i < nstreams ; i++)
{
hipStreamDestroy(streams[i]);
}
free(streams);
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
hipHostFree(a);
hipFree(d_a);
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 374a41cac4aa00dd3f262f841370bf9d3419b1b1.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates how HyperQ allows supporting devices to avoid false
// dependencies between kernels in different streams.
//
// - Devices without HyperQ will run a maximum of two kernels at a time (one
// kernel_A and one kernel_B).
// - Devices with HyperQ will run up to 32 kernels simultaneously.
#include <stdio.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_functions.h>
#include <helper_cuda.h>
const char *sSDKsample = "hyperQ";
// This subroutine does no real work but runs for at least the specified number
// of clock ticks.
__device__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t) (end_clock - start_clock);
}
d_o[0] = clock_offset;
}
// We create two identical kernels calling clock_block(), we create two so that
// we can identify dependencies in the profile timeline ("kernel_B" is always
// dependent on "kernel_A" in the same stream).
__global__ void kernel_A(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
__global__ void kernel_B(clock_t *d_o, clock_t clock_count)
{
clock_block(d_o, clock_count);
}
// Single-warp reduction kernel (note: this is not optimized for simplicity)
__global__ void sum(clock_t *d_clocks, int N)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x ; i < N ; i += blockDim.x)
{
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
cg::sync(cta);
for (int i = warpSize / 2 ; i > 0 ; i /= 2)
{
if (threadIdx.x < i)
{
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
cg::sync(cta);
}
if (threadIdx.x == 0)
{
d_clocks[0] = s_clocks[0];
}
}
int main(int argc, char **argv)
{
int nstreams = 32; // One stream for each pair of kernels
float kernel_time = 10; // Time each kernel should run in ms
float elapsed_time;
int cuda_device = 0;
printf("starting %s...\n", sSDKsample);
// Get number of streams (if overridden on the command line)
if (checkCmdLineFlag(argc, (const char **)argv, "nstreams"))
{
nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "nstreams");
}
// Use command-line specified CUDA device, otherwise use device with
// highest Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
// Get device properties
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDevice(&cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
// HyperQ is available in devices of Compute Capability 3.5 and higher
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf(" CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf(" CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate host memory for the output (reduced to a single value)
clock_t *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, sizeof(clock_t)));
// Allocate device memory for the output (one value for each kernel)
clock_t *d_a = 0;
checkCudaErrors(cudaMalloc((void **)&d_a, 2 * nstreams * sizeof(clock_t)));
// Allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
for (int i = 0 ; i < nstreams ; i++)
{
checkCudaErrors(cudaStreamCreate(&(streams[i])));
}
// Create CUDA event handles
cudaEvent_t start_event, stop_event;
checkCudaErrors(cudaEventCreate(&start_event));
checkCudaErrors(cudaEventCreate(&stop_event));
// Target time per kernel is kernel_time ms, clockRate is in KHz
// Target number of clocks = target time * clock frequency
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to prevent hangs reduce time_clocks.
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 100));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
clock_t total_clocks = 0;
// Start the clock
checkCudaErrors(cudaEventRecord(start_event, 0));
// Queue pairs of {kernel_A, kernel_B} in separate streams
for (int i = 0 ; i < nstreams ; ++i)
{
kernel_A<<<1,1,0,streams[i]>>>(&d_a[2*i], time_clocks);
total_clocks += time_clocks;
kernel_B<<<1,1,0,streams[i]>>>(&d_a[2*i+1], time_clocks);
total_clocks += time_clocks;
}
// Stop the clock in stream 0 (i.e. all previous kernels will be complete)
checkCudaErrors(cudaEventRecord(stop_event, 0));
// At this point the CPU has dispatched all work for the GPU and can
// continue processing other tasks in parallel. In this sample we just want
// to wait until all work is done so we use a blocking cudaMemcpy below.
// Run the sum kernel and copy the result back to host
sum<<<1,32>>>(d_a, 2 * nstreams);
checkCudaErrors(cudaMemcpy(a, d_a, sizeof(clock_t), cudaMemcpyDeviceToHost));
// stop_event will have been recorded but including the synchronize here to
// prevent copy/paste errors!
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs and %.3fs\n", nstreams, (nstreams + 1) * kernel_time / 1000.0f, 2 * nstreams *kernel_time / 1000.0f);
printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, 2 * kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] >= total_clocks);
// Release resources
for (int i = 0 ; i < nstreams ; i++)
{
cudaStreamDestroy(streams[i]);
}
free(streams);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
cudaFreeHost(a);
cudaFree(d_a);
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
432c7766a7519910aeb714f778273dcb02ad25ff.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include "contact.h"
using namespace std;
__device__ void parallel_sort_para(int mi, int nj, float sx, float sy, float sz, float pxmi, float pymi, float pzmi,
float pxnj, float pynj, float pznj, float pdotp, float rp,
float *xmin, float *ymin);
__global__ void contact(float **var, int **intVar){
const int npcn = 2000;
int mi = threadIdx.x + blockIdx.x*blockDim.x;
int nfib = *intVar[6];
int nseg = *intVar[7];
int *potConSize = intVar[33];
int *cluster = intVar[35];
int nPair = potConSize[mi];
if (nPair == 0) return;
int *ncpf = intVar[2];
int *clist = intVar[18];
int maxCon = *intVar[25];
int *potCon = intVar[32];
float *rx = var[3];
float *ry = var[4];
float *rz = var[5];
float *px = var[18];
float *py = var[19];
float *pz = var[20];
float *fcx = var[66];
float *fcy = var[67];
float *fcz = var[68];
float *tcx = var[69];
float *tcy = var[70];
float *tcz = var[71];
float rp = *var[114];
float over_cut = *var[118];
float sidex = *var[119];
float sidey = *var[120];
float sidez = *var[121];
float contact_cutoff = *var[128];
float rep_cutoff = *var[129];
float delta_rx = *var[138];
float fstar = *var[144];
float fact = *var[145];
float Astar = *var[146];
float decatt = *var[147];
float *GijxV = var[149];
float *GijyV = var[150];
float *GijzV = var[151];
float *GjixV = var[152];
float *GjiyV = var[153];
float *GjizV = var[154];
float *nxV = var[155];
float *nyV = var[156];
float *nzV = var[157];
float *gV = var[158];
float rxmi, rymi, rzmi, rxnj, rynj, rznj;
float pxmi, pymi, pzmi, pxnj, pynj, pznj;
float sxx, syy, szz, corx, cory, corz;
float rxmi_shift, rymi_shift, rzmi_shift;
float pdotp, xmin, ymin, dx, dy, dz, sep;
float xi[9], yj[9], gij, nijx, nijy, nijz, forc;
float Gijx, Gijy, Gijz, Gjix, Gjiy, Gjiz, sep_tmp;
int nP, nj, ipos, ith, oldmi, oldnj, m, n;
rxmi = rx[mi]; rymi = ry[mi]; rzmi = rz[mi];
pxmi = px[mi]; pymi = py[mi]; pzmi = pz[mi];
for (nP = 0; nP < nPair; nP++){
nj = potCon[mi * npcn + nP];
//printf("in loop mi nj %4d %4d\n", mi, nj);
rxnj = rx[nj]; rynj = ry[nj]; rznj = rz[nj];
pxnj = px[nj]; pynj = py[nj]; pznj = pz[nj];
// find minimum image (for shear flow system)
sxx = rxnj - rxmi;
syy = rynj - rymi;
szz = rznj - rzmi;
cory = roundf(syy / sidey);
corz = roundf(szz / sidez);
sxx = sxx - corz*delta_rx;
corx = roundf(sxx / sidex);
sxx = sxx - corx*sidex;
syy = syy - cory*sidey;
szz = szz - corz*sidez;
rxmi_shift = rxnj - sxx;
rymi_shift = rynj - syy;
rzmi_shift = rznj - szz;
pdotp = pxmi*pxnj + pymi*pynj + pzmi*pznj;
xmin = (-(pxnj * sxx + pynj * syy + pznj * szz)* pdotp
+ (pxmi * sxx + pymi * syy + pzmi * szz))
/ (1.0 - pdotp*pdotp);
ymin = ((pxmi * sxx + pymi * syy + pzmi * szz)* pdotp
- (pxnj * sxx + pynj * syy + pznj * szz))
/ (1.0 - pdotp*pdotp);
dx = rxnj + ymin*pxnj - rxmi_shift - xmin*pxmi;
dy = rynj + ymin*pynj - rymi_shift - xmin*pymi;
dz = rznj + ymin*pznj - rzmi_shift - xmin*pzmi;
sep = dx*dx + dy*dy + dz*dz;
//if (mi == 799 && nj == 1312){
// printf("rx mi nj %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f \n", rxmi, rymi, rzmi, rxnj, rynj, rznj);
//}
ipos = 8;
yj[0] = rp;
xi[0] = pxmi*sxx + pymi*syy + pzmi*szz + yj[0] * pdotp;
yj[1] = -rp;
xi[1] = pxmi*sxx + pymi*syy + pzmi*szz + yj[1] * pdotp;
xi[2] = rp;
yj[2] = -(pxnj*sxx + pynj*syy + pznj*szz) + xi[2] * pdotp;
xi[3] = -rp;
yj[3] = -(pxnj*sxx + pynj*syy + pznj*szz) + xi[3] * pdotp;
xi[4] = rp; yj[4] = rp;
xi[5] = rp; yj[5] = -rp;
xi[6] = -rp; yj[6] = rp;
xi[7] = -rp; yj[7] = -rp;
xi[8] = xmin; yj[8] = ymin;
//printf("mi nj pdoptp sep xmin ymin %4d %4d %20.10f %20.10f\n", mi, nj, pdotp, pdotp*pdotp);
// Check if segments are parallel
if (fabsf(pdotp*pdotp - 1.0) <= 1.0e-6) {
//printf("parallel sort %4d %4d\n", mi, nj);
parallel_sort_para(mi, nj, sxx, syy, szz, pxmi, pymi, pzmi,
pxnj, pynj, pznj, pdotp, rp, &xmin, &ymin);
sep = (sxx + ymin*pxnj - xmin*pxmi)*(sxx + ymin*pxnj - xmin*pxmi) +
(syy + ymin*pynj - xmin*pymi)*(syy + ymin*pynj - xmin*pymi) +
(szz + ymin*pznj - xmin*pzmi)*(szz + ymin*pznj - xmin*pzmi);
//printf("parallel: mi nj %4d %4d sep %15.8f xmin ymin %15.8f %15.8f\n", mi, nj, sep, xmin, ymin);
}
else if (sep < rep_cutoff && (fabsf(xmin) >= rp || fabsf(ymin) >= rp)){
sep = 1000.0;
// check which end-side or end-end separation
// is the smallest
for (ith = 0; ith < 8; ith++){
sep_tmp = (sxx + yj[ith] * pxnj - xi[ith] * pxmi)*(sxx + yj[ith] * pxnj - xi[ith] * pxmi) +
(syy + yj[ith] * pynj - xi[ith] * pymi)*(syy + yj[ith] * pynj - xi[ith] * pymi) +
(szz + yj[ith] * pznj - xi[ith] * pzmi)*(szz + yj[ith] * pznj - xi[ith] * pzmi);
if (sep_tmp < sep && fabsf(xi[ith]) <= rp && fabsf(yj[ith]) <= rp){
sep = sep_tmp;
ipos = ith;
}
}
xmin = xi[ipos];
ymin = yj[ipos];
}
gij = sqrtf(sep);
//printf("gij %15.8f\n", gij);
//if (mi == 799 && nj == 1312){
// printf("gij %15.10f\n", gij);
//}
nijx = (sxx + ymin*pxnj - xmin*pxmi) / gij;
nijy = (syy + ymin*pynj - xmin*pymi) / gij;
nijz = (szz + ymin*pznj - xmin*pzmi) / gij;
Gijx = xmin*pxmi + gij*nijx / 2.0;
Gijy = xmin*pymi + gij*nijy / 2.0;
Gijz = xmin*pzmi + gij*nijz / 2.0;
Gjix = ymin*pxnj - gij*nijx / 2.0;
Gjiy = ymin*pynj - gij*nijy / 2.0;
Gjiz = ymin*pznj - gij*nijz / 2.0;
if (gij < 2.0){
atomicAdd(intVar[4], 1); // overs
//printf("overs: %4d %4d %15.10f %15.10f %15.10f \n", mi, nj, xmin, ymin, gij);
}
if (gij < over_cut){
gij = over_cut;
//printf("overs: %4d %4d %15.10f %15.10f %15.10f \n", mi, nj, xmin, ymin, gij);
}
forc = fstar*expf(-fact*(gij - 2.0)) - Astar*expf(-decatt*(gij - 2.0)*(gij - 2.0));
if (sep < rep_cutoff){
//printf("rep %4d %4d %15.8f %15.8f %15.8f %15.8f %15.8f %15.8f\n", mi, nj, nijx, nijy, nijz, forc, sep, gij);
atomicAdd(fcx + mi, -nijx*forc);
atomicAdd(fcy + mi, -nijy*forc);
atomicAdd(fcz + mi, -nijz*forc);
atomicAdd(tcx + mi, -forc*xmin*(pymi*nijz - pzmi*nijy));
atomicAdd(tcy + mi, -forc*xmin*(pzmi*nijx - pxmi*nijz));
atomicAdd(tcz + mi, -forc*xmin*(pxmi*nijy - pymi*nijx));
atomicAdd(fcx + nj, nijx*forc);
atomicAdd(fcy + nj, nijy*forc);
atomicAdd(fcz + nj, nijz*forc);
atomicAdd(tcx + nj, forc*ymin*(pynj*nijz - pznj*nijy));
atomicAdd(tcy + nj, forc*ymin*(pznj*nijx - pxnj*nijz));
atomicAdd(tcz + nj, forc*ymin*(pxnj*nijy - pynj*nijx));
}
if (sep < contact_cutoff){
oldmi = atomicAdd(ncpf + mi, 1);
oldnj = atomicAdd(ncpf + nj, 1);
clist[mi*maxCon + oldmi] = nj;
clist[nj*maxCon + oldnj] = mi;
nxV[mi*maxCon + oldmi] = nijx;
nyV[mi*maxCon + oldmi] = nijy;
nzV[mi*maxCon + oldmi] = nijz;
gV[mi*maxCon + oldmi] = gij;
GijxV[mi*maxCon + oldmi] = Gijx;
GijyV[mi*maxCon + oldmi] = Gijy;
GijzV[mi*maxCon + oldmi] = Gijz;
GjixV[mi*maxCon + oldmi] = Gjix;
GjiyV[mi*maxCon + oldmi] = Gjiy;
GjizV[mi*maxCon + oldmi] = Gjiz;
m = mi / nseg;
n = nj / nseg;
cluster[m*nfib + n] = 1;
cluster[n*nfib + m] = 1;
//printf("%6d %6d %15.10f %15.10f %15.10f\n", mi, nj, xmin, ymin, gij);
if (oldmi >= maxCon || oldnj >= maxCon){
printf("link: mi nj %7d %7d exceeding maxCon, allocate more space\n", mi, nj);
}
}
}
}
__device__ void parallel_sort_para(int mi, int nj, float sx, float sy, float sz, float pxmi, float pymi, float pzmi,
float pxnj, float pynj, float pznj, float pdotp, float rp,
float *xmin, float *ymin){
//printf("accessing parallel_sort_para\n");
float posneg, pn2, dist, sijp, sijm, sjip, sjim;
//printf("%4d %4d %15.10f p %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f\n", mi, nj, pdotp, pxmi, pymi, pzmi, pxnj, pynj, pznj);
// The different end point to fiber contact points
sijp = pxmi*sx + pymi*sy + pzmi*sz + rp*pdotp;
sijm = pxmi*sx + pymi*sy + pzmi*sz - rp*pdotp;
sjip = -(pxnj*sx + pynj*sy + pznj*sz) + rp*pdotp;
sjim = -(pxnj*sx + pynj*sy + pznj*sz) - rp*pdotp;
//printf("parallel\n");
//printf("%4d %4d sx sy sz %15.10f %15.10f %15.10f sijp sijm sjip sjim %15.10f %15.10f %15.10f %15.10f\n", mi, nj, sx, sy, sz, sijp, sijm, sjip, sjim);
// for fiber i
if (fabsf(sijp) < fabsf(sijm)){
*xmin = sijp;
posneg = 1.0;
}
else if (fabsf(sijp) > fabsf(sijm)){
*xmin = sijm;
posneg = -1.0;
}
else{
*xmin = 0.0;
posneg = 0.0;
}
if (*xmin >= rp){
*xmin = rp;
}
if (*xmin <= -rp){
*xmin = -rp;
}
// for fiber j
if (fabsf(sjip) < fabsf(sjim)){
*ymin = sjip;
}
else if (fabsf(sjip) > fabsf(sjim)){
*ymin = sjim;
}
else{
*ymin = 0.0;
posneg = 0.0;
}
if (*ymin >= rp){
*ymin = rp;
}
if (*ymin <= -rp){
*ymin = -rp;
}
//printf("xmin ymin in %12.8f %12.8f\n", *xmin, *ymin);
//printf("xmin ymin out %4d %4d %16.10f %16.10f\n", mi, nj, *xmin, *ymin);
if (fabsf(*xmin) < rp && fabsf(*ymin) < rp){
if (pdotp > 0.0){
pn2 = 1.0;
}
else{
pn2 = -1.0;
}
dist = (rp + posneg**xmin) / 2.0;
*xmin = *xmin - posneg*dist;
*ymin = *ymin + posneg*pn2*dist;
//printf("xmin ymin in %12.8f %12.8f\n", *xmin, *ymin);
}
}
| 432c7766a7519910aeb714f778273dcb02ad25ff.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include "contact.h"
using namespace std;
__device__ void parallel_sort_para(int mi, int nj, float sx, float sy, float sz, float pxmi, float pymi, float pzmi,
float pxnj, float pynj, float pznj, float pdotp, float rp,
float *xmin, float *ymin);
__global__ void contact(float **var, int **intVar){
const int npcn = 2000;
int mi = threadIdx.x + blockIdx.x*blockDim.x;
int nfib = *intVar[6];
int nseg = *intVar[7];
int *potConSize = intVar[33];
int *cluster = intVar[35];
int nPair = potConSize[mi];
if (nPair == 0) return;
int *ncpf = intVar[2];
int *clist = intVar[18];
int maxCon = *intVar[25];
int *potCon = intVar[32];
float *rx = var[3];
float *ry = var[4];
float *rz = var[5];
float *px = var[18];
float *py = var[19];
float *pz = var[20];
float *fcx = var[66];
float *fcy = var[67];
float *fcz = var[68];
float *tcx = var[69];
float *tcy = var[70];
float *tcz = var[71];
float rp = *var[114];
float over_cut = *var[118];
float sidex = *var[119];
float sidey = *var[120];
float sidez = *var[121];
float contact_cutoff = *var[128];
float rep_cutoff = *var[129];
float delta_rx = *var[138];
float fstar = *var[144];
float fact = *var[145];
float Astar = *var[146];
float decatt = *var[147];
float *GijxV = var[149];
float *GijyV = var[150];
float *GijzV = var[151];
float *GjixV = var[152];
float *GjiyV = var[153];
float *GjizV = var[154];
float *nxV = var[155];
float *nyV = var[156];
float *nzV = var[157];
float *gV = var[158];
float rxmi, rymi, rzmi, rxnj, rynj, rznj;
float pxmi, pymi, pzmi, pxnj, pynj, pznj;
float sxx, syy, szz, corx, cory, corz;
float rxmi_shift, rymi_shift, rzmi_shift;
float pdotp, xmin, ymin, dx, dy, dz, sep;
float xi[9], yj[9], gij, nijx, nijy, nijz, forc;
float Gijx, Gijy, Gijz, Gjix, Gjiy, Gjiz, sep_tmp;
int nP, nj, ipos, ith, oldmi, oldnj, m, n;
rxmi = rx[mi]; rymi = ry[mi]; rzmi = rz[mi];
pxmi = px[mi]; pymi = py[mi]; pzmi = pz[mi];
for (nP = 0; nP < nPair; nP++){
nj = potCon[mi * npcn + nP];
//printf("in loop mi nj %4d %4d\n", mi, nj);
rxnj = rx[nj]; rynj = ry[nj]; rznj = rz[nj];
pxnj = px[nj]; pynj = py[nj]; pznj = pz[nj];
// find minimum image (for shear flow system)
sxx = rxnj - rxmi;
syy = rynj - rymi;
szz = rznj - rzmi;
cory = roundf(syy / sidey);
corz = roundf(szz / sidez);
sxx = sxx - corz*delta_rx;
corx = roundf(sxx / sidex);
sxx = sxx - corx*sidex;
syy = syy - cory*sidey;
szz = szz - corz*sidez;
rxmi_shift = rxnj - sxx;
rymi_shift = rynj - syy;
rzmi_shift = rznj - szz;
pdotp = pxmi*pxnj + pymi*pynj + pzmi*pznj;
xmin = (-(pxnj * sxx + pynj * syy + pznj * szz)* pdotp
+ (pxmi * sxx + pymi * syy + pzmi * szz))
/ (1.0 - pdotp*pdotp);
ymin = ((pxmi * sxx + pymi * syy + pzmi * szz)* pdotp
- (pxnj * sxx + pynj * syy + pznj * szz))
/ (1.0 - pdotp*pdotp);
dx = rxnj + ymin*pxnj - rxmi_shift - xmin*pxmi;
dy = rynj + ymin*pynj - rymi_shift - xmin*pymi;
dz = rznj + ymin*pznj - rzmi_shift - xmin*pzmi;
sep = dx*dx + dy*dy + dz*dz;
//if (mi == 799 && nj == 1312){
// printf("rx mi nj %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f \n", rxmi, rymi, rzmi, rxnj, rynj, rznj);
//}
ipos = 8;
yj[0] = rp;
xi[0] = pxmi*sxx + pymi*syy + pzmi*szz + yj[0] * pdotp;
yj[1] = -rp;
xi[1] = pxmi*sxx + pymi*syy + pzmi*szz + yj[1] * pdotp;
xi[2] = rp;
yj[2] = -(pxnj*sxx + pynj*syy + pznj*szz) + xi[2] * pdotp;
xi[3] = -rp;
yj[3] = -(pxnj*sxx + pynj*syy + pznj*szz) + xi[3] * pdotp;
xi[4] = rp; yj[4] = rp;
xi[5] = rp; yj[5] = -rp;
xi[6] = -rp; yj[6] = rp;
xi[7] = -rp; yj[7] = -rp;
xi[8] = xmin; yj[8] = ymin;
//printf("mi nj pdoptp sep xmin ymin %4d %4d %20.10f %20.10f\n", mi, nj, pdotp, pdotp*pdotp);
// Check if segments are parallel
if (fabsf(pdotp*pdotp - 1.0) <= 1.0e-6) {
//printf("parallel sort %4d %4d\n", mi, nj);
parallel_sort_para(mi, nj, sxx, syy, szz, pxmi, pymi, pzmi,
pxnj, pynj, pznj, pdotp, rp, &xmin, &ymin);
sep = (sxx + ymin*pxnj - xmin*pxmi)*(sxx + ymin*pxnj - xmin*pxmi) +
(syy + ymin*pynj - xmin*pymi)*(syy + ymin*pynj - xmin*pymi) +
(szz + ymin*pznj - xmin*pzmi)*(szz + ymin*pznj - xmin*pzmi);
//printf("parallel: mi nj %4d %4d sep %15.8f xmin ymin %15.8f %15.8f\n", mi, nj, sep, xmin, ymin);
}
else if (sep < rep_cutoff && (fabsf(xmin) >= rp || fabsf(ymin) >= rp)){
sep = 1000.0;
// check which end-side or end-end separation
// is the smallest
for (ith = 0; ith < 8; ith++){
sep_tmp = (sxx + yj[ith] * pxnj - xi[ith] * pxmi)*(sxx + yj[ith] * pxnj - xi[ith] * pxmi) +
(syy + yj[ith] * pynj - xi[ith] * pymi)*(syy + yj[ith] * pynj - xi[ith] * pymi) +
(szz + yj[ith] * pznj - xi[ith] * pzmi)*(szz + yj[ith] * pznj - xi[ith] * pzmi);
if (sep_tmp < sep && fabsf(xi[ith]) <= rp && fabsf(yj[ith]) <= rp){
sep = sep_tmp;
ipos = ith;
}
}
xmin = xi[ipos];
ymin = yj[ipos];
}
gij = sqrtf(sep);
//printf("gij %15.8f\n", gij);
//if (mi == 799 && nj == 1312){
// printf("gij %15.10f\n", gij);
//}
nijx = (sxx + ymin*pxnj - xmin*pxmi) / gij;
nijy = (syy + ymin*pynj - xmin*pymi) / gij;
nijz = (szz + ymin*pznj - xmin*pzmi) / gij;
Gijx = xmin*pxmi + gij*nijx / 2.0;
Gijy = xmin*pymi + gij*nijy / 2.0;
Gijz = xmin*pzmi + gij*nijz / 2.0;
Gjix = ymin*pxnj - gij*nijx / 2.0;
Gjiy = ymin*pynj - gij*nijy / 2.0;
Gjiz = ymin*pznj - gij*nijz / 2.0;
if (gij < 2.0){
atomicAdd(intVar[4], 1); // overs
//printf("overs: %4d %4d %15.10f %15.10f %15.10f \n", mi, nj, xmin, ymin, gij);
}
if (gij < over_cut){
gij = over_cut;
//printf("overs: %4d %4d %15.10f %15.10f %15.10f \n", mi, nj, xmin, ymin, gij);
}
forc = fstar*expf(-fact*(gij - 2.0)) - Astar*expf(-decatt*(gij - 2.0)*(gij - 2.0));
if (sep < rep_cutoff){
//printf("rep %4d %4d %15.8f %15.8f %15.8f %15.8f %15.8f %15.8f\n", mi, nj, nijx, nijy, nijz, forc, sep, gij);
atomicAdd(fcx + mi, -nijx*forc);
atomicAdd(fcy + mi, -nijy*forc);
atomicAdd(fcz + mi, -nijz*forc);
atomicAdd(tcx + mi, -forc*xmin*(pymi*nijz - pzmi*nijy));
atomicAdd(tcy + mi, -forc*xmin*(pzmi*nijx - pxmi*nijz));
atomicAdd(tcz + mi, -forc*xmin*(pxmi*nijy - pymi*nijx));
atomicAdd(fcx + nj, nijx*forc);
atomicAdd(fcy + nj, nijy*forc);
atomicAdd(fcz + nj, nijz*forc);
atomicAdd(tcx + nj, forc*ymin*(pynj*nijz - pznj*nijy));
atomicAdd(tcy + nj, forc*ymin*(pznj*nijx - pxnj*nijz));
atomicAdd(tcz + nj, forc*ymin*(pxnj*nijy - pynj*nijx));
}
if (sep < contact_cutoff){
oldmi = atomicAdd(ncpf + mi, 1);
oldnj = atomicAdd(ncpf + nj, 1);
clist[mi*maxCon + oldmi] = nj;
clist[nj*maxCon + oldnj] = mi;
nxV[mi*maxCon + oldmi] = nijx;
nyV[mi*maxCon + oldmi] = nijy;
nzV[mi*maxCon + oldmi] = nijz;
gV[mi*maxCon + oldmi] = gij;
GijxV[mi*maxCon + oldmi] = Gijx;
GijyV[mi*maxCon + oldmi] = Gijy;
GijzV[mi*maxCon + oldmi] = Gijz;
GjixV[mi*maxCon + oldmi] = Gjix;
GjiyV[mi*maxCon + oldmi] = Gjiy;
GjizV[mi*maxCon + oldmi] = Gjiz;
m = mi / nseg;
n = nj / nseg;
cluster[m*nfib + n] = 1;
cluster[n*nfib + m] = 1;
//printf("%6d %6d %15.10f %15.10f %15.10f\n", mi, nj, xmin, ymin, gij);
if (oldmi >= maxCon || oldnj >= maxCon){
printf("link: mi nj %7d %7d exceeding maxCon, allocate more space\n", mi, nj);
}
}
}
}
__device__ void parallel_sort_para(int mi, int nj, float sx, float sy, float sz, float pxmi, float pymi, float pzmi,
float pxnj, float pynj, float pznj, float pdotp, float rp,
float *xmin, float *ymin){
//printf("accessing parallel_sort_para\n");
float posneg, pn2, dist, sijp, sijm, sjip, sjim;
//printf("%4d %4d %15.10f p %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f\n", mi, nj, pdotp, pxmi, pymi, pzmi, pxnj, pynj, pznj);
// The different end point to fiber contact points
sijp = pxmi*sx + pymi*sy + pzmi*sz + rp*pdotp;
sijm = pxmi*sx + pymi*sy + pzmi*sz - rp*pdotp;
sjip = -(pxnj*sx + pynj*sy + pznj*sz) + rp*pdotp;
sjim = -(pxnj*sx + pynj*sy + pznj*sz) - rp*pdotp;
//printf("parallel\n");
//printf("%4d %4d sx sy sz %15.10f %15.10f %15.10f sijp sijm sjip sjim %15.10f %15.10f %15.10f %15.10f\n", mi, nj, sx, sy, sz, sijp, sijm, sjip, sjim);
// for fiber i
if (fabsf(sijp) < fabsf(sijm)){
*xmin = sijp;
posneg = 1.0;
}
else if (fabsf(sijp) > fabsf(sijm)){
*xmin = sijm;
posneg = -1.0;
}
else{
*xmin = 0.0;
posneg = 0.0;
}
if (*xmin >= rp){
*xmin = rp;
}
if (*xmin <= -rp){
*xmin = -rp;
}
// for fiber j
if (fabsf(sjip) < fabsf(sjim)){
*ymin = sjip;
}
else if (fabsf(sjip) > fabsf(sjim)){
*ymin = sjim;
}
else{
*ymin = 0.0;
posneg = 0.0;
}
if (*ymin >= rp){
*ymin = rp;
}
if (*ymin <= -rp){
*ymin = -rp;
}
//printf("xmin ymin in %12.8f %12.8f\n", *xmin, *ymin);
//printf("xmin ymin out %4d %4d %16.10f %16.10f\n", mi, nj, *xmin, *ymin);
if (fabsf(*xmin) < rp && fabsf(*ymin) < rp){
if (pdotp > 0.0){
pn2 = 1.0;
}
else{
pn2 = -1.0;
}
dist = (rp + posneg**xmin) / 2.0;
*xmin = *xmin - posneg*dist;
*ymin = *ymin + posneg*pn2*dist;
//printf("xmin ymin in %12.8f %12.8f\n", *xmin, *ymin);
}
}
|
1e547a5ebb1cba9310d3d2889be9a0d7b5239297.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
//#define DEVICE_ALLOC
#define UVM_ALLOC
//#define HOST_ALLOC
//#define SIZE (1024 * 8)
//#define STEP 16
//#define SIZE (1024 * 1024)
//#define STEP (1024 * 32)
//#define STEP 512
#define SIZE (1024 * 1024 * 1024)
//#define STEP (1024 * 1024 * 32)
#define STEP (512)
//#define SIZE (1024 * 1024 * 1024L * 2)
//#define STEP (1024 * 1024 * 32)
//#define PRINT_LAT
#define LAT_ARRAY_SIZE 12
#define LAT_LOWER_BOUND 10000
#define LAT_HIGHER_BOUND 20000
__global__ void kernel(int *input, double *total_lat)
{
unsigned t0, t1, lat;
__shared__ int s_tmp;
double maxlat, minlat, totallat;
double maxlat_l, minlat_l, totallat_l;
double maxlat_s, minlat_s, totallat_s;
unsigned llat_num, slat_num;
s_tmp = 0;
totallat = maxlat = minlat = 0.0;
totallat_l = maxlat_l = minlat_l = 0.0;
totallat_s = maxlat_s = minlat_s = 0.0;
llat_num = slat_num = 0;
for (unsigned long long i = 0; i < SIZE; i += STEP) {
//if (i == 1024 * 350) {
// i += 1024 * 482;
//}
t0 = clock();
__syncthreads();
s_tmp += input[i];
__syncthreads();
t1 = clock();
lat = t1 - t0;
#ifdef PRINT_LAT
printf("0x%10llx: %d\n", i, lat);
#endif
totallat += lat;
if (lat > maxlat)
maxlat = lat;
if (lat < minlat || minlat == 0)
minlat = lat;
// classify lat
if (lat >= LAT_LOWER_BOUND && lat <= LAT_HIGHER_BOUND)
total_lat[3] += lat;
else if (lat < LAT_LOWER_BOUND) {
totallat_s += lat;
if (lat > maxlat_s)
maxlat_s = lat;
if (lat < minlat_s || minlat_s == 0)
minlat_s = lat;
slat_num++;
} else {
totallat_l += lat;
if (lat > maxlat_l)
maxlat_l = lat;
if (lat < minlat_l || minlat_l == 0)
minlat_l = lat;
llat_num++;
}
//if (i >= 1024 * (849 - 1))
// return;
}
total_lat[0] = totallat;
total_lat[1] = maxlat;
total_lat[2] = minlat;
total_lat[4] = totallat_l;
total_lat[5] = maxlat_l;
total_lat[6] = minlat_l;
total_lat[7] = totallat_s;
total_lat[8] = maxlat_s;
total_lat[9] = minlat_s;
total_lat[10] = llat_num;
total_lat[11] = slat_num;
}
int main()
{
int *d_input;
double *total_lat, *h_total_lat;
h_total_lat = (double*)malloc(LAT_ARRAY_SIZE * sizeof(double));
hipMalloc(&total_lat, LAT_ARRAY_SIZE*sizeof(double));
for (int i = 0; i < LAT_ARRAY_SIZE; i++)
h_total_lat[i] = 0.0;
hipMemcpy(total_lat, h_total_lat, LAT_ARRAY_SIZE*sizeof(double), hipMemcpyHostToDevice);
#if defined(DEVICE_ALLOC)
hipMalloc(&d_input, SIZE*sizeof(int));
#elif defined(UVM_ALLOC)
hipMallocManaged(&d_input, SIZE*sizeof(int));
#elif defined(HOST_ALLOC)
hipHostMalloc(&d_input, SIZE*sizeof(int));
#else
return 0;
#endif
// init
#if defined(DEVICE_ALLOC)
int *h_input;
h_input = (int*)malloc(SIZE*sizeof(int));
for (unsigned long long i = 0; i < SIZE; i += STEP) {
h_input[i] = rand();
}
hipMemcpy(d_input, h_input, SIZE*sizeof(int), hipMemcpyHostToDevice);
#elif defined(UVM_ALLOC) || defined(HOST_ALLOC)
for (unsigned long long i = 0; i < SIZE; i += STEP) {
d_input[i] = rand();
}
#endif
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, d_input, total_lat);
hipMemcpy(h_total_lat, total_lat, LAT_ARRAY_SIZE*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(total_lat);
double AvgLat = h_total_lat[0] / (SIZE / STEP);
printf("Average latency: %f (%f / %lld)\n", AvgLat, h_total_lat[0], SIZE / STEP);
printf("Max latency: %f\n", h_total_lat[1]);
printf("Min latency: %f\n", h_total_lat[2]);
printf("\n");
printf("Average latency (large): %f (%f / %f)\n", h_total_lat[4] / h_total_lat[10], h_total_lat[4], h_total_lat[10]);
printf("Max latency (large): %f\n", h_total_lat[5]);
printf("Min latency (large): %f\n", h_total_lat[6]);
printf("\n");
printf("Average latency (short): %f (%f / %f)\n", h_total_lat[7] / h_total_lat[11], h_total_lat[7], h_total_lat[11]);
printf("Max latency (short): %f\n", h_total_lat[8]);
printf("Min latency (short): %f\n", h_total_lat[9]);
printf("\n");
printf("Abnormal total: %f\n", h_total_lat[3]);
return 0;
}
| 1e547a5ebb1cba9310d3d2889be9a0d7b5239297.cu | #include <stdio.h>
#include <stdlib.h>
//#define DEVICE_ALLOC
#define UVM_ALLOC
//#define HOST_ALLOC
//#define SIZE (1024 * 8)
//#define STEP 16
//#define SIZE (1024 * 1024)
//#define STEP (1024 * 32)
//#define STEP 512
#define SIZE (1024 * 1024 * 1024)
//#define STEP (1024 * 1024 * 32)
#define STEP (512)
//#define SIZE (1024 * 1024 * 1024L * 2)
//#define STEP (1024 * 1024 * 32)
//#define PRINT_LAT
#define LAT_ARRAY_SIZE 12
#define LAT_LOWER_BOUND 10000
#define LAT_HIGHER_BOUND 20000
__global__ void kernel(int *input, double *total_lat)
{
unsigned t0, t1, lat;
__shared__ int s_tmp;
double maxlat, minlat, totallat;
double maxlat_l, minlat_l, totallat_l;
double maxlat_s, minlat_s, totallat_s;
unsigned llat_num, slat_num;
s_tmp = 0;
totallat = maxlat = minlat = 0.0;
totallat_l = maxlat_l = minlat_l = 0.0;
totallat_s = maxlat_s = minlat_s = 0.0;
llat_num = slat_num = 0;
for (unsigned long long i = 0; i < SIZE; i += STEP) {
//if (i == 1024 * 350) {
// i += 1024 * 482;
//}
t0 = clock();
__syncthreads();
s_tmp += input[i];
__syncthreads();
t1 = clock();
lat = t1 - t0;
#ifdef PRINT_LAT
printf("0x%10llx: %d\n", i, lat);
#endif
totallat += lat;
if (lat > maxlat)
maxlat = lat;
if (lat < minlat || minlat == 0)
minlat = lat;
// classify lat
if (lat >= LAT_LOWER_BOUND && lat <= LAT_HIGHER_BOUND)
total_lat[3] += lat;
else if (lat < LAT_LOWER_BOUND) {
totallat_s += lat;
if (lat > maxlat_s)
maxlat_s = lat;
if (lat < minlat_s || minlat_s == 0)
minlat_s = lat;
slat_num++;
} else {
totallat_l += lat;
if (lat > maxlat_l)
maxlat_l = lat;
if (lat < minlat_l || minlat_l == 0)
minlat_l = lat;
llat_num++;
}
//if (i >= 1024 * (849 - 1))
// return;
}
total_lat[0] = totallat;
total_lat[1] = maxlat;
total_lat[2] = minlat;
total_lat[4] = totallat_l;
total_lat[5] = maxlat_l;
total_lat[6] = minlat_l;
total_lat[7] = totallat_s;
total_lat[8] = maxlat_s;
total_lat[9] = minlat_s;
total_lat[10] = llat_num;
total_lat[11] = slat_num;
}
int main()
{
int *d_input;
double *total_lat, *h_total_lat;
h_total_lat = (double*)malloc(LAT_ARRAY_SIZE * sizeof(double));
cudaMalloc(&total_lat, LAT_ARRAY_SIZE*sizeof(double));
for (int i = 0; i < LAT_ARRAY_SIZE; i++)
h_total_lat[i] = 0.0;
cudaMemcpy(total_lat, h_total_lat, LAT_ARRAY_SIZE*sizeof(double), cudaMemcpyHostToDevice);
#if defined(DEVICE_ALLOC)
cudaMalloc(&d_input, SIZE*sizeof(int));
#elif defined(UVM_ALLOC)
cudaMallocManaged(&d_input, SIZE*sizeof(int));
#elif defined(HOST_ALLOC)
cudaMallocHost(&d_input, SIZE*sizeof(int));
#else
return 0;
#endif
// init
#if defined(DEVICE_ALLOC)
int *h_input;
h_input = (int*)malloc(SIZE*sizeof(int));
for (unsigned long long i = 0; i < SIZE; i += STEP) {
h_input[i] = rand();
}
cudaMemcpy(d_input, h_input, SIZE*sizeof(int), cudaMemcpyHostToDevice);
#elif defined(UVM_ALLOC) || defined(HOST_ALLOC)
for (unsigned long long i = 0; i < SIZE; i += STEP) {
d_input[i] = rand();
}
#endif
kernel<<<1, 1>>>(d_input, total_lat);
cudaMemcpy(h_total_lat, total_lat, LAT_ARRAY_SIZE*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(total_lat);
double AvgLat = h_total_lat[0] / (SIZE / STEP);
printf("Average latency: %f (%f / %lld)\n", AvgLat, h_total_lat[0], SIZE / STEP);
printf("Max latency: %f\n", h_total_lat[1]);
printf("Min latency: %f\n", h_total_lat[2]);
printf("\n");
printf("Average latency (large): %f (%f / %f)\n", h_total_lat[4] / h_total_lat[10], h_total_lat[4], h_total_lat[10]);
printf("Max latency (large): %f\n", h_total_lat[5]);
printf("Min latency (large): %f\n", h_total_lat[6]);
printf("\n");
printf("Average latency (short): %f (%f / %f)\n", h_total_lat[7] / h_total_lat[11], h_total_lat[7], h_total_lat[11]);
printf("Max latency (short): %f\n", h_total_lat[8]);
printf("Min latency (short): %f\n", h_total_lat[9]);
printf("\n");
printf("Abnormal total: %f\n", h_total_lat[3]);
return 0;
}
|
411ceee6f1e0fe02692d2fd5dc215f52b7e58ae3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i=0; i<N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
if (match) printf("Arrays match.\n\n");
}
void initialData(float *ip,int size) {
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i=0; i<size; i++) {
ip[i] = (float)( rand() & 0xFF )/10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx=0; idx<N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipSetDevice(dev);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
dim3 block (nElem);
dim3 grid (nElem/block.x);
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n",grid.x,block.x);
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
| 411ceee6f1e0fe02692d2fd5dc215f52b7e58ae3.cu | #include <cuda_runtime.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i=0; i<N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
if (match) printf("Arrays match.\n\n");
}
void initialData(float *ip,int size) {
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i=0; i<size; i++) {
ip[i] = (float)( rand() & 0xFF )/10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx=0; idx<N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaSetDevice(dev);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
dim3 block (nElem);
dim3 grid (nElem/block.x);
sumArraysOnGPU<<< grid, block >>>(d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n",grid.x,block.x);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
|
4394867cbdca53ea4bb23a3274123d0d9ba14088.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| 4394867cbdca53ea4bb23a3274123d0d9ba14088.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
f13a922493ec5712c84ce00fd4a79d50ec2e2d1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// function to add the elements of two arrays
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// printf("\rthreadIdx.x %d blockIdx.x %d ", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20; // 1M elements
// Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// float *x = new float[N];
// float *y = new float[N];
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
//add(N, x, y);
// Run kernel on 1M elements on the GPU
//add<<<1, 1>>>(N, x, y);
//add<<<1, 256>>>(N, x, y);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
// Free memory
// delete [] x;
// delete [] y;
//hipProfilerStop();
return 0;
}
| f13a922493ec5712c84ce00fd4a79d50ec2e2d1a.cu | #include <iostream>
#include <math.h>
// function to add the elements of two arrays
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// printf("\rthreadIdx.x %d blockIdx.x %d ", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20; // 1M elements
// Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// float *x = new float[N];
// float *y = new float[N];
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
//add(N, x, y);
// Run kernel on 1M elements on the GPU
//add<<<1, 1>>>(N, x, y);
//add<<<1, 256>>>(N, x, y);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
// Free memory
// delete [] x;
// delete [] y;
//cudaProfilerStop();
return 0;
}
|
60c47e14c298593bd9bd130b48264eb7493e1d82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************************
* sample.cu
* This is a example of the CUDA program.
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* Example */
/************************************************************************/
__global__ static void HelloCUDA(char* result, int num)
{
int i = 0;
char p_HelloCUDA[] = "Hello CUDA!";
for(i = 0; i < num; i++) {
result[i] = p_HelloCUDA[i];
}
}
/************************************************************************/
/* HelloCUDA */
/************************************************************************/
int main(int argc, char* argv[])
{
if(!InitCUDA()) {
return 0;
}
char *device_result = 0;
char host_result[12] ={0};
cutilSafeCall( hipMalloc((void**) &device_result, sizeof(char) * 11));
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
hipLaunchKernelGGL(( HelloCUDA), dim3(1), dim3(1), 0, 0, device_result, 11);
cutilCheckMsg("Kernel execution failed\n");
hipDeviceSynchronize();
cutilCheckError( cutStopTimer( timer));
printf("Processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
cutilSafeCall( hipMemcpy(host_result, device_result, sizeof(char) * 11, hipMemcpyDeviceToHost));
printf("%s\n", host_result);
cutilSafeCall( hipFree(device_result));
return 0;
}
| 60c47e14c298593bd9bd130b48264eb7493e1d82.cu | /********************************************************************
* sample.cu
* This is a example of the CUDA program.
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
/************************************************************************/
/* Example */
/************************************************************************/
__global__ static void HelloCUDA(char* result, int num)
{
int i = 0;
char p_HelloCUDA[] = "Hello CUDA!";
for(i = 0; i < num; i++) {
result[i] = p_HelloCUDA[i];
}
}
/************************************************************************/
/* HelloCUDA */
/************************************************************************/
int main(int argc, char* argv[])
{
if(!InitCUDA()) {
return 0;
}
char *device_result = 0;
char host_result[12] ={0};
cutilSafeCall( cudaMalloc((void**) &device_result, sizeof(char) * 11));
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
HelloCUDA<<<1, 1, 0>>>(device_result, 11);
cutilCheckMsg("Kernel execution failed\n");
cudaThreadSynchronize();
cutilCheckError( cutStopTimer( timer));
printf("Processing time: %f (ms)\n", cutGetTimerValue( timer));
cutilCheckError( cutDeleteTimer( timer));
cutilSafeCall( cudaMemcpy(host_result, device_result, sizeof(char) * 11, cudaMemcpyDeviceToHost));
printf("%s\n", host_result);
cutilSafeCall( cudaFree(device_result));
return 0;
}
|
eb7e641d35a16a956578f96794aadc86314621c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* A very simple CUDA example adding two arrays of ints together.
*
* Shows common paradigm of copy input data to device, copy results back to host
* Introduces many CUDA concepts
* - device pointers with hipMalloc, hipMemcpy
* - writing GPU kernel code (and getting threadIdx)
* - launching kernel
* - kernel launch dimensions, threads, blocks, grid
*
* Danny George 2012
*/
#include <stdio.h>
void do_the_add(int *a, int *b, int *r, int i);
const int N = 512 * 1024;
// initialize an array with a counting sequence
void fill_array_count(int *arr, const size_t n)
{
for (size_t i=0; i<n; ++i) {
arr[i] = (int)i;
}
}
// initialize an array with a constant number
void fill_array_const(int *arr, const size_t n, const int val)
{
for (size_t i=0; i<n; ++i) {
arr[i] = val;
}
}
// a CUDA kernel function
// the CUDA runtime spawns many parallel threads to execute it
// the executing thread id can be found through the threadIdx.[xyz] and blockIdx.[xyz] variables
// (this example doesn't spawn more than one block)
// the __global__ attribute tells the compiler that this is
// code that is called by the host and run on the device
__global__
void vector_add(int *a, int *b, int *r, const size_t n)
{
// convert from 2D launch to 1D array index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= N)
return;
r[tid] = a[tid] + b[tid];
// you can call __device__ functions from __global__ functions
//do_the_add(a, b, r, tid);
}
// __device__ tells the compiler this function is called by the device and runs on the device
// __host__ tells the compiler to make another version to run on the host (normal function)
__device__ __host__
void do_the_add(int *a, int *b, int *r, int i)
{
r[i] = a[i] + b[i];
}
int main(int argc, char const *argv[])
{
int *host_a;
int *host_b;
int *host_r;
int *dev_a;
int *dev_b;
int *dev_r;
// NOTE: this example does no error checking!
hipError_t err;
// ---- ALLOCATE MEMORY ON HOST -----------
host_a = (int *)malloc(sizeof(int) * N);
host_b = (int *)malloc(sizeof(int) * N);
host_r = (int *)malloc(sizeof(int) * N);
if (host_a == NULL || host_b == NULL || host_r == NULL) {
fprintf(stderr, "malloc error on host\n");
exit(1);
}
// ---- ALLOCATE MEMORY ON DEVICE ---------
// hipMalloc(void **dev_ptr, size_t count)
err = hipMalloc(&dev_a, sizeof(int) * N);
err = hipMalloc(&dev_b, sizeof(int) * N);
err = hipMalloc(&dev_r, sizeof(int) * N);
// ---- INITIALIZE DATA ON HOST -----------
fill_array_count(host_a, N);
fill_array_const(host_b, N, 10);
// ---- COPY DATA OVER TO DEVICE ----------
// hipMemcpy(void *dst, const void *src, size_t count, hipMemcpyKind kind)
err = hipMemcpy(dev_a, host_a, sizeof(int) * N, hipMemcpyHostToDevice);
err = hipMemcpy(dev_b, host_b, sizeof(int) * N, hipMemcpyHostToDevice);
// ---- PERFORM COMPUTATION ON DEVICE -----
int threads_per_block = 128;
int blocks_per_grid = ((N + threads_per_block - 1) / threads_per_block); // integer div, ensures at least 1 block
hipLaunchKernelGGL(( vector_add), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, dev_a, dev_b, dev_r, N);
// the <<<dim3 gridDim, dim3 blockDim>>> is a CUDA extension to launch kernels
// grids are made up of blocks
// blocks are made up of threads
// ---- COPY RESULT DATA BACK TO HOST ----
err = hipMemcpy(host_r, dev_r, sizeof(int) * N, hipMemcpyDeviceToHost);
// verify results
bool success = true;
for (size_t i=0; i<N; ++i) {
if (host_r[i] != host_a[i] + host_b[i]) {
fprintf(stderr, "ERROR [index %u]: %d != %d + %d", i, host_r[i], host_a[i], host_b[i]);
success = false;
break;
}
}
// ---- CLEANUP -------------------------
// free memory on host
free(host_a);
free(host_b);
free(host_r);
// free memory on device
err = hipFree(dev_a);
err = hipFree(dev_b);
err = hipFree(dev_r);
if (success)
printf("It worked!\n");
else
return 1;
return 0;
}
| eb7e641d35a16a956578f96794aadc86314621c2.cu | /*
* A very simple CUDA example adding two arrays of ints together.
*
* Shows common paradigm of copy input data to device, copy results back to host
* Introduces many CUDA concepts
* - device pointers with cudaMalloc, cudaMemcpy
* - writing GPU kernel code (and getting threadIdx)
* - launching kernel
* - kernel launch dimensions, threads, blocks, grid
*
* Danny George 2012
*/
#include <stdio.h>
void do_the_add(int *a, int *b, int *r, int i);
const int N = 512 * 1024;
// initialize an array with a counting sequence
void fill_array_count(int *arr, const size_t n)
{
for (size_t i=0; i<n; ++i) {
arr[i] = (int)i;
}
}
// initialize an array with a constant number
void fill_array_const(int *arr, const size_t n, const int val)
{
for (size_t i=0; i<n; ++i) {
arr[i] = val;
}
}
// a CUDA kernel function
// the CUDA runtime spawns many parallel threads to execute it
// the executing thread id can be found through the threadIdx.[xyz] and blockIdx.[xyz] variables
// (this example doesn't spawn more than one block)
// the __global__ attribute tells the compiler that this is
// code that is called by the host and run on the device
__global__
void vector_add(int *a, int *b, int *r, const size_t n)
{
// convert from 2D launch to 1D array index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= N)
return;
r[tid] = a[tid] + b[tid];
// you can call __device__ functions from __global__ functions
//do_the_add(a, b, r, tid);
}
// __device__ tells the compiler this function is called by the device and runs on the device
// __host__ tells the compiler to make another version to run on the host (normal function)
__device__ __host__
void do_the_add(int *a, int *b, int *r, int i)
{
r[i] = a[i] + b[i];
}
int main(int argc, char const *argv[])
{
int *host_a;
int *host_b;
int *host_r;
int *dev_a;
int *dev_b;
int *dev_r;
// NOTE: this example does no error checking!
cudaError_t err;
// ---- ALLOCATE MEMORY ON HOST -----------
host_a = (int *)malloc(sizeof(int) * N);
host_b = (int *)malloc(sizeof(int) * N);
host_r = (int *)malloc(sizeof(int) * N);
if (host_a == NULL || host_b == NULL || host_r == NULL) {
fprintf(stderr, "malloc error on host\n");
exit(1);
}
// ---- ALLOCATE MEMORY ON DEVICE ---------
// cudaMalloc(void **dev_ptr, size_t count)
err = cudaMalloc(&dev_a, sizeof(int) * N);
err = cudaMalloc(&dev_b, sizeof(int) * N);
err = cudaMalloc(&dev_r, sizeof(int) * N);
// ---- INITIALIZE DATA ON HOST -----------
fill_array_count(host_a, N);
fill_array_const(host_b, N, 10);
// ---- COPY DATA OVER TO DEVICE ----------
// cudaMemcpy(void *dst, const void *src, size_t count, cudaMemcpyKind kind)
err = cudaMemcpy(dev_a, host_a, sizeof(int) * N, cudaMemcpyHostToDevice);
err = cudaMemcpy(dev_b, host_b, sizeof(int) * N, cudaMemcpyHostToDevice);
// ---- PERFORM COMPUTATION ON DEVICE -----
int threads_per_block = 128;
int blocks_per_grid = ((N + threads_per_block - 1) / threads_per_block); // integer div, ensures at least 1 block
vector_add<<<blocks_per_grid, threads_per_block>>>(dev_a, dev_b, dev_r, N);
// the <<<dim3 gridDim, dim3 blockDim>>> is a CUDA extension to launch kernels
// grids are made up of blocks
// blocks are made up of threads
// ---- COPY RESULT DATA BACK TO HOST ----
err = cudaMemcpy(host_r, dev_r, sizeof(int) * N, cudaMemcpyDeviceToHost);
// verify results
bool success = true;
for (size_t i=0; i<N; ++i) {
if (host_r[i] != host_a[i] + host_b[i]) {
fprintf(stderr, "ERROR [index %u]: %d != %d + %d", i, host_r[i], host_a[i], host_b[i]);
success = false;
break;
}
}
// ---- CLEANUP -------------------------
// free memory on host
free(host_a);
free(host_b);
free(host_r);
// free memory on device
err = cudaFree(dev_a);
err = cudaFree(dev_b);
err = cudaFree(dev_r);
if (success)
printf("It worked!\n");
else
return 1;
return 0;
}
|
93668e58dbb26d4b42b716cd70414f9f60a896ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel2DXnp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dataOutput = NULL;
hipMalloc(&dataOutput, XSIZE*YSIZE);
double *dataInput = NULL;
hipMalloc(&dataInput, XSIZE*YSIZE);
const double *weights = NULL;
hipMalloc(&weights, XSIZE*YSIZE);
const int numSten = 1;
const int numStenLeft = 1;
const int numStenRight = 1;
const int nxLocal = 1;
const int nyLocal = 1;
const int BLOCK_X = 1;
const int nx = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel2DXnp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,weights,numSten,numStenLeft,numStenRight,nxLocal,nyLocal,BLOCK_X,nx);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel2DXnp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,weights,numSten,numStenLeft,numStenRight,nxLocal,nyLocal,BLOCK_X,nx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel2DXnp), dim3(gridBlock),dim3(threadBlock), 0, 0, dataOutput,dataInput,weights,numSten,numStenLeft,numStenRight,nxLocal,nyLocal,BLOCK_X,nx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 93668e58dbb26d4b42b716cd70414f9f60a896ee.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel2DXnp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *dataOutput = NULL;
cudaMalloc(&dataOutput, XSIZE*YSIZE);
double *dataInput = NULL;
cudaMalloc(&dataInput, XSIZE*YSIZE);
const double *weights = NULL;
cudaMalloc(&weights, XSIZE*YSIZE);
const int numSten = 1;
const int numStenLeft = 1;
const int numStenRight = 1;
const int nxLocal = 1;
const int nyLocal = 1;
const int BLOCK_X = 1;
const int nx = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel2DXnp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,weights,numSten,numStenLeft,numStenRight,nxLocal,nyLocal,BLOCK_X,nx);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel2DXnp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,weights,numSten,numStenLeft,numStenRight,nxLocal,nyLocal,BLOCK_X,nx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel2DXnp<<<gridBlock,threadBlock>>>(dataOutput,dataInput,weights,numSten,numStenLeft,numStenRight,nxLocal,nyLocal,BLOCK_X,nx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6b58754252911015251cedf1820c20a9c8d2a9f7.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Oliveira in 3D (nostro, pensiamo di essere i primi)
#define BLOCK_X 4
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (labels[labels_index]) {
if (z > 0) {
unsigned current_plane = labels_index - (labels.stepz / labels.elem_size);
if (y > 0) {
unsigned current_row = current_plane - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
unsigned current_row = current_plane;
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
if (y + 1 < labels.y) {
unsigned current_row = current_plane + (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
}
{
if (y > 0) {
unsigned current_row = labels_index - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
if (x > 0 && labels[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_3D : public GpuLabeling3D<CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
// Etichetta i pixel localmente al blocco
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
//d_img_labels_.download(img_labels_);
//Mat errors;
//bool correct = CheckLabeledVolume(img_, img_labels_, errors);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\UF_errors", errors);
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
void GlobalScan() {
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_3D);
| 6b58754252911015251cedf1820c20a9c8d2a9f7.cu | #include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Oliveira in 3D (nostro, pensiamo di essere i primi)
#define BLOCK_X 4
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (labels[labels_index]) {
if (z > 0) {
unsigned current_plane = labels_index - (labels.stepz / labels.elem_size);
if (y > 0) {
unsigned current_row = current_plane - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
unsigned current_row = current_plane;
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
if (y + 1 < labels.y) {
unsigned current_row = current_plane + (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
}
{
if (y > 0) {
unsigned current_row = labels_index - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
if (x > 0 && labels[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_3D : public GpuLabeling3D<CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
// Etichetta i pixel localmente al blocco
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
//d_img_labels_.download(img_labels_);
//Mat errors;
//bool correct = CheckLabeledVolume(img_, img_labels_, errors);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\UF_errors", errors);
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
void GlobalScan() {
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_3D);
|
82d4cf5645b9ee61134c2a656f901b6ed001a76f.hip | // !!! This is a file automatically generated by hipify!!!
#include <UnitTest++.h>
#include <tuple>
#include <algorithm>
#ifdef __HIPCC__
#include <thrust/for_each.h>
#include <thrust/execution_policy.h>
#endif
#include "GPUAtomicAdd.hh"
#include "SimpleVector.hh"
SUITE( GPUAtomicAdd_simple_tests ) {
class testing{ // because UnitTest++ doesn't play well with GPU lambdas
public:
MonteRay::SimpleVector<double> cVec;
MonteRay::SimpleVector<std::tuple<double, double>> ab;
testing(){
ab = MonteRay::SimpleVector<std::tuple<double, double>>(1024, {1, 2});
cVec = MonteRay::SimpleVector<double>(1, 0.0); // one value that's alloc'd via malloc managed
}
};
TEST_FIXTURE(testing, GPUAtomicAdd){
auto cData = cVec.data();
auto func = [ cData ] CUDA_CALLABLE_MEMBER (const std::tuple<double, double>& val) {
MonteRay::gpu_atomicAdd(cData, std::get<0>(val) + std::get<1>(val));
};
#ifdef __HIPCC__
thrust::for_each(thrust::device, ab.begin(), ab.end(), func);
hipDeviceSynchronize();
auto& c = cVec[0];
CHECK_EQUAL(3*1024, c);
c = 0;
hipDeviceSynchronize();
thrust::for_each(thrust::host, ab.begin(), ab.end(), func);
CHECK_EQUAL(3*1024, c);
hipDeviceSynchronize();
#else
std::for_each(ab.begin(), ab.end(), func);
auto val = static_cast<double>(3*1024);
CHECK_EQUAL(c, val);
#endif
}
}
| 82d4cf5645b9ee61134c2a656f901b6ed001a76f.cu | #include <UnitTest++.h>
#include <tuple>
#include <algorithm>
#ifdef __CUDACC__
#include <thrust/for_each.h>
#include <thrust/execution_policy.h>
#endif
#include "GPUAtomicAdd.hh"
#include "SimpleVector.hh"
SUITE( GPUAtomicAdd_simple_tests ) {
class testing{ // because UnitTest++ doesn't play well with GPU lambdas
public:
MonteRay::SimpleVector<double> cVec;
MonteRay::SimpleVector<std::tuple<double, double>> ab;
testing(){
ab = MonteRay::SimpleVector<std::tuple<double, double>>(1024, {1, 2});
cVec = MonteRay::SimpleVector<double>(1, 0.0); // one value that's alloc'd via malloc managed
}
};
TEST_FIXTURE(testing, GPUAtomicAdd){
auto cData = cVec.data();
auto func = [ cData ] CUDA_CALLABLE_MEMBER (const std::tuple<double, double>& val) {
MonteRay::gpu_atomicAdd(cData, std::get<0>(val) + std::get<1>(val));
};
#ifdef __CUDACC__
thrust::for_each(thrust::device, ab.begin(), ab.end(), func);
cudaDeviceSynchronize();
auto& c = cVec[0];
CHECK_EQUAL(3*1024, c);
c = 0;
cudaDeviceSynchronize();
thrust::for_each(thrust::host, ab.begin(), ab.end(), func);
CHECK_EQUAL(3*1024, c);
cudaDeviceSynchronize();
#else
std::for_each(ab.begin(), ab.end(), func);
auto val = static_cast<double>(3*1024);
CHECK_EQUAL(c, val);
#endif
}
}
|
f0ed8172325235f10ac85003373bdfd468fc7e88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/layer_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/layer_norm_impl.cu.h"
#include "paddle/phi/kernels/funcs/layer_norm_util.h"
#include "paddle/utils/flags.h"
PD_DECLARE_bool(use_fast_math);
namespace phi {
#ifdef PADDLE_WITH_CUDA
template <typename U>
__device__ inline void WelfordOnline(U val, U *mean, U *square, U *count) {
*count += 1;
U delta1 = val - *mean;
*mean += delta1 / (*count);
U delta2 = val - *mean;
*square += delta1 * delta2;
}
template <typename U>
__device__ inline void WelfordOnline(
U b_mean, U b_square, U b_cnt, U *mean, U *square, U *count) {
if (b_cnt == 0) {
return;
}
U new_cnt = *count + b_cnt;
U nb_n = b_cnt / new_cnt;
U delta = b_mean - *mean;
*mean += delta * nb_n;
*square += b_square + delta * delta * (*count) * nb_n;
*count = new_cnt;
}
template <typename U>
__device__ inline void WelfordWarpAllReduce(U *mean, U *square, U *count) {
constexpr int kWarpSize = 32;
#pragma unroll
for (int mask = 1; mask < kWarpSize; mask *= 2) {
U b_mean = __shfl_down_sync(0xffffffff, *mean, mask);
U b_square = __shfl_down_sync(0xffffffff, *square, mask);
U b_cnt = __shfl_down_sync(0xffffffff, *count, mask);
WelfordOnline<U>(b_mean, b_square, b_cnt, mean, square, count);
}
*mean = __shfl_sync(0xffffffff, *mean, 0, kWarpSize);
*square = __shfl_sync(0xffffffff, *square, 0, kWarpSize);
*count = __shfl_sync(0xffffffff, *count, 0, kWarpSize);
}
template <int VecSize>
struct ThreadAssigner {
__device__ __forceinline__ int operator()(const int cols,
const int cols_per_thread,
int32_t *last_tid_idx) {
return cols_per_thread;
}
};
template <>
struct ThreadAssigner<1> {
__device__ inline int operator()(const int cols,
const int cols_per_thread,
int *last_tid_idx) {
int cols_this_thread = cols_per_thread;
int last_tid = (cols / cols_per_thread);
*last_tid_idx = last_tid;
if (threadIdx.x == last_tid) {
cols_this_thread = cols - cols_per_thread * last_tid;
} else if (threadIdx.x > last_tid) {
cols_this_thread = 0;
}
return cols_this_thread;
}
};
template <typename T, typename U, int VecSize>
struct LayerNormDataReader {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
using VecT = phi::AlignedVector<T, VecSize>;
const VecT *__restrict__ v_src =
reinterpret_cast<const VecT *__restrict__>(row_src);
for (int i = 0; i < read_times; ++i) {
VecT temp_src = v_src[threadIdx.x + i * blockDim.x];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
buffer[i * VecSize + j] = static_cast<U>(temp_src[j]);
}
}
}
};
template <typename T, typename U>
struct LayerNormDataReader<T, U, 1> {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
// read_time is just cols_per_thread while VecSize is 1.
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[threadIdx.x + last_tid_idx * i]);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[i + read_times * last_tid_idx]);
}
}
}
};
template <typename T, typename U, bool IsSameType, int VecSize>
struct LayerNormDataWritter {
__device__ inline void operator()(
T *__restrict__ row_dst,
const U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
using VecT = phi::AlignedVector<T, VecSize>;
using ScaleT = funcs::LayerNormScaleBiasT<T, U, IsSameType>;
using VecScaleT = phi::AlignedVector<ScaleT, VecSize>;
VecT *v_dst = reinterpret_cast<VecT *>(row_dst);
// cols_this_thread is just cols_per_thread
if ((!valid_scale) && (!valid_bias)) {
for (int i = 0; i < write_times; ++i) {
VecT temp_dst;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>((buffer[i * VecSize + j] - row_mean) *
row_inv_var);
}
v_dst[threadIdx.x + blockDim.x * i] = temp_dst;
}
} else {
const VecScaleT *__restrict__ v_scale =
reinterpret_cast<const VecScaleT *__restrict__>(scale);
const VecScaleT *__restrict__ v_bias =
reinterpret_cast<const VecScaleT *__restrict__>(bias);
if (valid_scale && valid_bias) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
} else {
if (valid_scale) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var);
}
v_dst[idx] = temp_dst;
}
} else {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
}
}
}
}
};
template <typename T, typename U, bool IsSameType>
struct LayerNormDataWritter<T, U, IsSameType, 1> {
__device__ __forceinline__ void operator()(
T *__restrict__ row_dst,
U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
// write_times is just col_per_thread.
if ((!valid_scale) && (!valid_bias)) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[threadIdx.x + last_tid_idx * i] =
(buffer[i] - row_mean) * row_inv_var;
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[last_tid_idx * write_times + i] =
(buffer[i] - row_mean) * row_inv_var;
}
}
} else if (valid_scale && valid_bias) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
} else {
if (valid_scale) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
}
} else {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
}
}
}
};
template <typename IndexT, typename T, typename U, bool IsSameType, int VecSize>
__global__ void LayerNormFwdWithWelford(
const T *__restrict__ src_data,
T *dst_data,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
U *mean,
U *var,
const U epsilon,
const IndexT rows,
const int32_t cols,
const int32_t cols_per_thread,
const bool valid_scale,
const bool valid_bias) {
constexpr int kWarpSize = 32;
int last_tid_idx = 0; // For condition once vecSize is 1.
IndexT row_offset = blockIdx.x * blockDim.y + threadIdx.y;
int cols_this_thread =
ThreadAssigner<VecSize>()(cols, cols_per_thread, &last_tid_idx);
int read_times = cols_per_thread / VecSize;
if (row_offset < rows) {
U buffer[kWarpSize];
U tid_cnt = static_cast<U>(0);
U tid_mean = static_cast<U>(0);
U tid_square = static_cast<U>(0);
const T *__restrict__ row_src = src_data + row_offset * cols;
T *row_dst = dst_data + row_offset * cols;
LayerNormDataReader<T, U, VecSize>()(
row_src, buffer, last_tid_idx, read_times, cols_this_thread);
for (int i = 0; i < cols_this_thread; i++) {
WelfordOnline<U>(buffer[i], &tid_mean, &tid_square, &tid_cnt);
}
U warp_cnt = tid_cnt;
U warp_mean = tid_mean;
U warp_square = tid_square;
WelfordWarpAllReduce<U>(&warp_mean, &warp_square, &warp_cnt);
U row_variance = max(warp_square / warp_cnt, 0.f);
U row_inv_var = funcs::rsqrt_(row_variance + epsilon);
// TODO(limingshu): make code below vectorization.
if (threadIdx.x == 0) {
// warp_mean is just row_mean here.
mean[row_offset] = warp_mean;
var[row_offset] = row_variance;
}
LayerNormDataWritter<T, U, IsSameType, VecSize>()(row_dst,
buffer,
scale,
bias,
warp_mean,
row_inv_var,
read_times,
cols_this_thread,
last_tid_idx,
valid_scale,
valid_bias);
}
}
template <typename Context, typename T, typename U>
void LaunchLayerNormKernel(const Context &dev_ctx,
const T *x_data,
T *y_data,
const void *void_scale_data,
const void *void_bias_data,
U *mean_data,
U *var_data,
float epsilon,
const int64_t rows,
const int cols,
const bool valid_scale,
const bool valid_bias,
const bool is_same_type) {
constexpr int WarpSize = 32;
constexpr int RowPerBlock = 4;
int64_t block_size = (rows + (RowPerBlock - 1)) / RowPerBlock;
dim3 threads(WarpSize, RowPerBlock, 1);
int vec_size = 1;
int cols_per_thread = (cols + (WarpSize - 1)) / WarpSize;
if (cols_per_thread > 1 && (cols % WarpSize == 0)) {
int data_vec_size = 0;
uint64_t addr = (reinterpret_cast<uint64_t>(x_data) |
reinterpret_cast<uint64_t>(y_data));
if (valid_bias || valid_scale) {
if (is_same_type) {
addr = valid_scale
? (addr | reinterpret_cast<uint64_t>(void_scale_data))
: addr;
addr = valid_bias ? (addr | reinterpret_cast<uint64_t>(void_bias_data))
: addr;
data_vec_size = phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr));
} else {
uint64_t bias_addr = reinterpret_cast<uint64_t>(void_bias_data);
uint64_t attr_addr = valid_scale
? reinterpret_cast<uint64_t>(void_scale_data)
: bias_addr;
attr_addr = valid_bias
? (valid_scale ? (attr_addr | bias_addr) : attr_addr)
: attr_addr;
data_vec_size = ::min(
phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr)),
phi::GetVectorizedSize<U>(reinterpret_cast<U *>(attr_addr)));
}
}
for (int size = data_vec_size; size > 0; size /= 2) {
if (cols_per_thread % size == 0) {
vec_size = size;
break;
}
}
}
#define IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, vec_size_) \
case (vec_size_): { \
hipLaunchKernelGGL(( LayerNormFwdWithWelford<index_t, T, U, is_same_, vec_size_>) \
, dim3(block_size), dim3(threads), 0, dev_ctx.stream(), \
x_data, \
y_data, \
static_cast<const scale_t *>(void_scale_data), \
static_cast<const scale_t *>(void_bias_data), \
mean_data, \
var_data, \
static_cast<const U>(epsilon), \
rows, \
cols, \
cols_per_thread, \
valid_scale, \
valid_bias); \
} break
#define IMPL_LAYER_NORM_WELFORD(index_t, scale_t, is_same_) \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 4); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 2); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 1);
if (rows < std::numeric_limits<int32_t>::max()) {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, U, false); }
}
} else {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, U, false); }
}
}
#undef IMPL_LAYER_NORM_WELFORD_CASE
#undef IMPL_LAYER_NORM_WELFORD
}
#endif // PADDLE_WITH_CUDA
template <typename T, typename U>
void LayerNormDirectCUDAFunctor<T, U>::operator()(gpuStream_t stream,
const T *input,
std::vector<int> input_shape,
const U *bias,
const U *scale,
T *output,
U *mean,
U *variance,
int begin_norm_axis,
float eps) {
const auto x_dims = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
switch (phi::funcs::GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( phi::funcs::LayerNormForward<T, U, kBlockDim>)
, dim3(batch_size), dim3(kBlockDim), 0, stream,
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template class LayerNormDirectCUDAFunctor<float, float>;
template class LayerNormDirectCUDAFunctor<double, double>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class LayerNormDirectCUDAFunctor<half, float>;
#endif
template <typename T, typename Context>
void LayerNormKernel(const Context &dev_ctx,
const DenseTensor &x,
const paddle::optional<DenseTensor> &scale_opt,
const paddle::optional<DenseTensor> &bias_opt,
float epsilon,
int begin_norm_axis,
DenseTensor *y,
DenseTensor *mean,
DenseTensor *var) {
using U = phi::funcs::LayerNormParamType<T>;
auto *scale = scale_opt.get_ptr();
auto *bias = bias_opt.get_ptr();
const auto x_dims = x.dims();
auto *x_data = x.data<T>();
auto *y_data = dev_ctx.template Alloc<T>(y);
auto *mean_data = dev_ctx.template Alloc<U>(mean);
auto *var_data = dev_ctx.template Alloc<U>(var);
bool valid_scale = (scale != nullptr);
bool valid_bias = (bias != nullptr);
auto *void_scale_data = valid_scale ? scale->data() : nullptr;
auto *void_bias_data = valid_bias ? bias->data() : nullptr;
auto x_dtype = x.dtype();
phi::DataType scale_bias_dtype;
if (valid_scale) {
scale_bias_dtype = scale->dtype();
if (valid_bias) {
PADDLE_ENFORCE_EQ(
scale->dtype(),
bias->dtype(),
phi::errors::InvalidArgument("This Scale and Bias of layer_norm op "
"should have the same data type."));
}
} else {
scale_bias_dtype = valid_bias ? bias->dtype() : x_dtype;
}
bool is_scale_bias_same_dtype_with_x = x_dtype == scale_bias_dtype;
if (!is_scale_bias_same_dtype_with_x) {
PADDLE_ENFORCE_EQ(scale_bias_dtype,
phi::CppTypeToDataType<U>::Type(),
phi::errors::InvalidArgument(
"Unsupported data type of Scale and Bias"));
}
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
auto stream = dev_ctx.stream();
#define PADDLE_LAUNCH_LAYERNORM_FWD(ScaleBiasT, IsScaleBiasSameDTypeWithX) \
do { \
switch (phi::funcs::GetDesiredBlockDim(feature_size)) { \
FIXED_BLOCK_DIM_CASE( \
phi::funcs:: \
hipLaunchKernelGGL(( LayerNormForward<T, U, kBlockDim, IsScaleBiasSameDTypeWithX>) \
, dim3(batch_size), dim3(kBlockDim), 0, stream, \
x_data, \
static_cast<const ScaleBiasT *>(void_scale_data), \
static_cast<const ScaleBiasT *>(void_bias_data), \
y_data, \
mean_data, \
var_data, \
epsilon, \
feature_size)); \
default: \
PADDLE_THROW(phi::errors::InvalidArgument( \
"Product from begin_norm_axis to end must be larger than 1")); \
break; \
} \
} while (0)
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, feature_size) \
case (feature_size): { \
constexpr int WARPS_N = feature_size < 1024 ? 1 : (feature_size / 1024); \
constexpr int WARPS_M = 4 / WARPS_N; \
const int THREADS_PER_WARP = 32; \
const int BYTES_PER_LDG = 16; \
const int VecSize = BYTES_PER_LDG / sizeof(T); \
const int THREADS_PER_CTA = WARPS_N * THREADS_PER_WARP * WARPS_M; \
const int ROWS_PER_CTA = WARPS_M; \
const int grid = static_cast<int>( \
::ceil(batch_size / static_cast<float>(ROWS_PER_CTA))); \
hipLaunchKernelGGL(( phi::funcs::fast_ln_fwd_kernel<T, \
U, \
ScaleT, \
VecSize, \
WARPS_M, \
WARPS_N, \
BYTES_PER_LDG, \
feature_size>) \
, dim3(grid), dim3(THREADS_PER_CTA), 0, stream, \
batch_size, \
feature_size, \
epsilon, \
x_data, \
static_cast<const ScaleT *>(void_scale_data), \
static_cast<const ScaleT *>(void_bias_data), \
mean_data, \
var_data, \
y_data); \
} break
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD(ScaleT) \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 768); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1024); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1280); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1536); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1792); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 2048); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 4096)
#ifdef PADDLE_WITH_CUDA
bool can_call_fast_kernel = false;
if ((feature_size >= 768 && feature_size <= 2048 && feature_size % 256 == 0 ||
feature_size == 4096) &&
scale != nullptr && bias != nullptr) {
can_call_fast_kernel = true;
}
if (can_call_fast_kernel) {
if (is_scale_bias_same_dtype_with_x) {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(T);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"256 is supported "
"now"));
break;
}
} else {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(U);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"is supported "
"now"));
break;
}
}
} else {
// WarpShuffle intrinsics is involved in LaunchLayerNormKernel.
if (FLAGS_use_fast_math && feature_size <= 1024 &&
(!std::is_same<T, int8_t>::value)) {
LaunchLayerNormKernel<Context, T, U>(dev_ctx,
x_data,
y_data,
void_scale_data,
void_bias_data,
mean_data,
var_data,
epsilon,
batch_size,
feature_size,
valid_scale,
valid_bias,
is_scale_bias_same_dtype_with_x);
} else {
#endif
if (is_scale_bias_same_dtype_with_x) {
PADDLE_LAUNCH_LAYERNORM_FWD(T, true);
} else {
PADDLE_LAUNCH_LAYERNORM_FWD(U, false);
}
#ifdef PADDLE_WITH_CUDA
}
}
#endif
#undef PADDLE_LAUNCH_LAYERNORM_FWD
#undef PADDLE_LAUNCH_FAST_LAYERNORM_FWD
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#elif CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#else
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#endif
| f0ed8172325235f10ac85003373bdfd468fc7e88.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/layer_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/layer_norm_impl.cu.h"
#include "paddle/phi/kernels/funcs/layer_norm_util.h"
#include "paddle/utils/flags.h"
PD_DECLARE_bool(use_fast_math);
namespace phi {
#ifdef PADDLE_WITH_CUDA
template <typename U>
__device__ inline void WelfordOnline(U val, U *mean, U *square, U *count) {
*count += 1;
U delta1 = val - *mean;
*mean += delta1 / (*count);
U delta2 = val - *mean;
*square += delta1 * delta2;
}
template <typename U>
__device__ inline void WelfordOnline(
U b_mean, U b_square, U b_cnt, U *mean, U *square, U *count) {
if (b_cnt == 0) {
return;
}
U new_cnt = *count + b_cnt;
U nb_n = b_cnt / new_cnt;
U delta = b_mean - *mean;
*mean += delta * nb_n;
*square += b_square + delta * delta * (*count) * nb_n;
*count = new_cnt;
}
template <typename U>
__device__ inline void WelfordWarpAllReduce(U *mean, U *square, U *count) {
constexpr int kWarpSize = 32;
#pragma unroll
for (int mask = 1; mask < kWarpSize; mask *= 2) {
U b_mean = __shfl_down_sync(0xffffffff, *mean, mask);
U b_square = __shfl_down_sync(0xffffffff, *square, mask);
U b_cnt = __shfl_down_sync(0xffffffff, *count, mask);
WelfordOnline<U>(b_mean, b_square, b_cnt, mean, square, count);
}
*mean = __shfl_sync(0xffffffff, *mean, 0, kWarpSize);
*square = __shfl_sync(0xffffffff, *square, 0, kWarpSize);
*count = __shfl_sync(0xffffffff, *count, 0, kWarpSize);
}
template <int VecSize>
struct ThreadAssigner {
__device__ __forceinline__ int operator()(const int cols,
const int cols_per_thread,
int32_t *last_tid_idx) {
return cols_per_thread;
}
};
template <>
struct ThreadAssigner<1> {
__device__ inline int operator()(const int cols,
const int cols_per_thread,
int *last_tid_idx) {
int cols_this_thread = cols_per_thread;
int last_tid = (cols / cols_per_thread);
*last_tid_idx = last_tid;
if (threadIdx.x == last_tid) {
cols_this_thread = cols - cols_per_thread * last_tid;
} else if (threadIdx.x > last_tid) {
cols_this_thread = 0;
}
return cols_this_thread;
}
};
template <typename T, typename U, int VecSize>
struct LayerNormDataReader {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
using VecT = phi::AlignedVector<T, VecSize>;
const VecT *__restrict__ v_src =
reinterpret_cast<const VecT *__restrict__>(row_src);
for (int i = 0; i < read_times; ++i) {
VecT temp_src = v_src[threadIdx.x + i * blockDim.x];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
buffer[i * VecSize + j] = static_cast<U>(temp_src[j]);
}
}
}
};
template <typename T, typename U>
struct LayerNormDataReader<T, U, 1> {
__device__ inline void operator()(const T *__restrict__ row_src,
U *buffer,
const int last_tid_idx,
const int read_times,
const int cols_this_thread) {
// read_time is just cols_per_thread while VecSize is 1.
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[threadIdx.x + last_tid_idx * i]);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
buffer[i] = static_cast<U>(row_src[i + read_times * last_tid_idx]);
}
}
}
};
template <typename T, typename U, bool IsSameType, int VecSize>
struct LayerNormDataWritter {
__device__ inline void operator()(
T *__restrict__ row_dst,
const U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
using VecT = phi::AlignedVector<T, VecSize>;
using ScaleT = funcs::LayerNormScaleBiasT<T, U, IsSameType>;
using VecScaleT = phi::AlignedVector<ScaleT, VecSize>;
VecT *v_dst = reinterpret_cast<VecT *>(row_dst);
// cols_this_thread is just cols_per_thread
if ((!valid_scale) && (!valid_bias)) {
for (int i = 0; i < write_times; ++i) {
VecT temp_dst;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>((buffer[i * VecSize + j] - row_mean) *
row_inv_var);
}
v_dst[threadIdx.x + blockDim.x * i] = temp_dst;
}
} else {
const VecScaleT *__restrict__ v_scale =
reinterpret_cast<const VecScaleT *__restrict__>(scale);
const VecScaleT *__restrict__ v_bias =
reinterpret_cast<const VecScaleT *__restrict__>(bias);
if (valid_scale && valid_bias) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
} else {
if (valid_scale) {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_scale = v_scale[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
static_cast<U>(temp_v_scale[j]) *
(buffer[i * VecSize + j] - row_mean) * row_inv_var);
}
v_dst[idx] = temp_dst;
}
} else {
for (int i = 0; i < write_times; ++i) {
int idx = threadIdx.x + blockDim.x * i;
VecT temp_dst;
VecScaleT temp_v_bias = v_bias[idx];
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
temp_dst[j] = static_cast<T>(
(buffer[i * VecSize + j] - row_mean) * row_inv_var +
static_cast<U>(temp_v_bias[j]));
}
v_dst[idx] = temp_dst;
}
}
}
}
}
};
template <typename T, typename U, bool IsSameType>
struct LayerNormDataWritter<T, U, IsSameType, 1> {
__device__ __forceinline__ void operator()(
T *__restrict__ row_dst,
U *__restrict__ buffer,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
const U row_mean,
const U row_inv_var,
const int write_times,
const int cols_this_thread,
const int last_tid_idx,
const bool valid_scale,
const bool valid_bias) {
// write_times is just col_per_thread.
if ((!valid_scale) && (!valid_bias)) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[threadIdx.x + last_tid_idx * i] =
(buffer[i] - row_mean) * row_inv_var;
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
row_dst[last_tid_idx * write_times + i] =
(buffer[i] - row_mean) * row_inv_var;
}
}
} else if (valid_scale && valid_bias) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] =
static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
} else {
if (valid_scale) {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>(static_cast<U>(scale[idx]) *
(buffer[i] - row_mean) * row_inv_var);
}
}
} else {
if (threadIdx.x < last_tid_idx) {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = threadIdx.x + last_tid_idx * i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
} else {
for (int i = 0; i < cols_this_thread; ++i) {
int idx = last_tid_idx * write_times + i;
row_dst[idx] = static_cast<T>((buffer[i] - row_mean) * row_inv_var +
static_cast<U>(bias[idx]));
}
}
}
}
}
};
template <typename IndexT, typename T, typename U, bool IsSameType, int VecSize>
__global__ void LayerNormFwdWithWelford(
const T *__restrict__ src_data,
T *dst_data,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ scale,
const funcs::LayerNormScaleBiasT<T, U, IsSameType> *__restrict__ bias,
U *mean,
U *var,
const U epsilon,
const IndexT rows,
const int32_t cols,
const int32_t cols_per_thread,
const bool valid_scale,
const bool valid_bias) {
constexpr int kWarpSize = 32;
int last_tid_idx = 0; // For condition once vecSize is 1.
IndexT row_offset = blockIdx.x * blockDim.y + threadIdx.y;
int cols_this_thread =
ThreadAssigner<VecSize>()(cols, cols_per_thread, &last_tid_idx);
int read_times = cols_per_thread / VecSize;
if (row_offset < rows) {
U buffer[kWarpSize];
U tid_cnt = static_cast<U>(0);
U tid_mean = static_cast<U>(0);
U tid_square = static_cast<U>(0);
const T *__restrict__ row_src = src_data + row_offset * cols;
T *row_dst = dst_data + row_offset * cols;
LayerNormDataReader<T, U, VecSize>()(
row_src, buffer, last_tid_idx, read_times, cols_this_thread);
for (int i = 0; i < cols_this_thread; i++) {
WelfordOnline<U>(buffer[i], &tid_mean, &tid_square, &tid_cnt);
}
U warp_cnt = tid_cnt;
U warp_mean = tid_mean;
U warp_square = tid_square;
WelfordWarpAllReduce<U>(&warp_mean, &warp_square, &warp_cnt);
U row_variance = max(warp_square / warp_cnt, 0.f);
U row_inv_var = funcs::rsqrt_(row_variance + epsilon);
// TODO(limingshu): make code below vectorization.
if (threadIdx.x == 0) {
// warp_mean is just row_mean here.
mean[row_offset] = warp_mean;
var[row_offset] = row_variance;
}
LayerNormDataWritter<T, U, IsSameType, VecSize>()(row_dst,
buffer,
scale,
bias,
warp_mean,
row_inv_var,
read_times,
cols_this_thread,
last_tid_idx,
valid_scale,
valid_bias);
}
}
template <typename Context, typename T, typename U>
void LaunchLayerNormKernel(const Context &dev_ctx,
const T *x_data,
T *y_data,
const void *void_scale_data,
const void *void_bias_data,
U *mean_data,
U *var_data,
float epsilon,
const int64_t rows,
const int cols,
const bool valid_scale,
const bool valid_bias,
const bool is_same_type) {
constexpr int WarpSize = 32;
constexpr int RowPerBlock = 4;
int64_t block_size = (rows + (RowPerBlock - 1)) / RowPerBlock;
dim3 threads(WarpSize, RowPerBlock, 1);
int vec_size = 1;
int cols_per_thread = (cols + (WarpSize - 1)) / WarpSize;
if (cols_per_thread > 1 && (cols % WarpSize == 0)) {
int data_vec_size = 0;
uint64_t addr = (reinterpret_cast<uint64_t>(x_data) |
reinterpret_cast<uint64_t>(y_data));
if (valid_bias || valid_scale) {
if (is_same_type) {
addr = valid_scale
? (addr | reinterpret_cast<uint64_t>(void_scale_data))
: addr;
addr = valid_bias ? (addr | reinterpret_cast<uint64_t>(void_bias_data))
: addr;
data_vec_size = phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr));
} else {
uint64_t bias_addr = reinterpret_cast<uint64_t>(void_bias_data);
uint64_t attr_addr = valid_scale
? reinterpret_cast<uint64_t>(void_scale_data)
: bias_addr;
attr_addr = valid_bias
? (valid_scale ? (attr_addr | bias_addr) : attr_addr)
: attr_addr;
data_vec_size = std::min(
phi::GetVectorizedSize<T>(reinterpret_cast<T *>(addr)),
phi::GetVectorizedSize<U>(reinterpret_cast<U *>(attr_addr)));
}
}
for (int size = data_vec_size; size > 0; size /= 2) {
if (cols_per_thread % size == 0) {
vec_size = size;
break;
}
}
}
#define IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, vec_size_) \
case (vec_size_): { \
LayerNormFwdWithWelford<index_t, T, U, is_same_, vec_size_> \
<<<block_size, threads, 0, dev_ctx.stream()>>>( \
x_data, \
y_data, \
static_cast<const scale_t *>(void_scale_data), \
static_cast<const scale_t *>(void_bias_data), \
mean_data, \
var_data, \
static_cast<const U>(epsilon), \
rows, \
cols, \
cols_per_thread, \
valid_scale, \
valid_bias); \
} break
#define IMPL_LAYER_NORM_WELFORD(index_t, scale_t, is_same_) \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 4); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 2); \
IMPL_LAYER_NORM_WELFORD_CASE(index_t, scale_t, is_same_, 1);
if (rows < std::numeric_limits<int32_t>::max()) {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int32_t, U, false); }
}
} else {
if (is_same_type) {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, T, true); }
} else {
switch (vec_size) { IMPL_LAYER_NORM_WELFORD(int64_t, U, false); }
}
}
#undef IMPL_LAYER_NORM_WELFORD_CASE
#undef IMPL_LAYER_NORM_WELFORD
}
#endif // PADDLE_WITH_CUDA
template <typename T, typename U>
void LayerNormDirectCUDAFunctor<T, U>::operator()(gpuStream_t stream,
const T *input,
std::vector<int> input_shape,
const U *bias,
const U *scale,
T *output,
U *mean,
U *variance,
int begin_norm_axis,
float eps) {
const auto x_dims = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
switch (phi::funcs::GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
phi::funcs::LayerNormForward<T, U, kBlockDim>
<<<batch_size, kBlockDim, 0, stream>>>(
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template class LayerNormDirectCUDAFunctor<float, float>;
template class LayerNormDirectCUDAFunctor<double, double>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class LayerNormDirectCUDAFunctor<half, float>;
#endif
template <typename T, typename Context>
void LayerNormKernel(const Context &dev_ctx,
const DenseTensor &x,
const paddle::optional<DenseTensor> &scale_opt,
const paddle::optional<DenseTensor> &bias_opt,
float epsilon,
int begin_norm_axis,
DenseTensor *y,
DenseTensor *mean,
DenseTensor *var) {
using U = phi::funcs::LayerNormParamType<T>;
auto *scale = scale_opt.get_ptr();
auto *bias = bias_opt.get_ptr();
const auto x_dims = x.dims();
auto *x_data = x.data<T>();
auto *y_data = dev_ctx.template Alloc<T>(y);
auto *mean_data = dev_ctx.template Alloc<U>(mean);
auto *var_data = dev_ctx.template Alloc<U>(var);
bool valid_scale = (scale != nullptr);
bool valid_bias = (bias != nullptr);
auto *void_scale_data = valid_scale ? scale->data() : nullptr;
auto *void_bias_data = valid_bias ? bias->data() : nullptr;
auto x_dtype = x.dtype();
phi::DataType scale_bias_dtype;
if (valid_scale) {
scale_bias_dtype = scale->dtype();
if (valid_bias) {
PADDLE_ENFORCE_EQ(
scale->dtype(),
bias->dtype(),
phi::errors::InvalidArgument("This Scale and Bias of layer_norm op "
"should have the same data type."));
}
} else {
scale_bias_dtype = valid_bias ? bias->dtype() : x_dtype;
}
bool is_scale_bias_same_dtype_with_x = x_dtype == scale_bias_dtype;
if (!is_scale_bias_same_dtype_with_x) {
PADDLE_ENFORCE_EQ(scale_bias_dtype,
phi::CppTypeToDataType<U>::Type(),
phi::errors::InvalidArgument(
"Unsupported data type of Scale and Bias"));
}
auto matrix_dim = phi::flatten_to_2d(x_dims, begin_norm_axis);
int64_t batch_size = static_cast<int64_t>(matrix_dim[0]);
int64_t feature_size = static_cast<int64_t>(matrix_dim[1]);
auto stream = dev_ctx.stream();
#define PADDLE_LAUNCH_LAYERNORM_FWD(ScaleBiasT, IsScaleBiasSameDTypeWithX) \
do { \
switch (phi::funcs::GetDesiredBlockDim(feature_size)) { \
FIXED_BLOCK_DIM_CASE( \
phi::funcs:: \
LayerNormForward<T, U, kBlockDim, IsScaleBiasSameDTypeWithX> \
<<<batch_size, kBlockDim, 0, stream>>>( \
x_data, \
static_cast<const ScaleBiasT *>(void_scale_data), \
static_cast<const ScaleBiasT *>(void_bias_data), \
y_data, \
mean_data, \
var_data, \
epsilon, \
feature_size)); \
default: \
PADDLE_THROW(phi::errors::InvalidArgument( \
"Product from begin_norm_axis to end must be larger than 1")); \
break; \
} \
} while (0)
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, feature_size) \
case (feature_size): { \
constexpr int WARPS_N = feature_size < 1024 ? 1 : (feature_size / 1024); \
constexpr int WARPS_M = 4 / WARPS_N; \
const int THREADS_PER_WARP = 32; \
const int BYTES_PER_LDG = 16; \
const int VecSize = BYTES_PER_LDG / sizeof(T); \
const int THREADS_PER_CTA = WARPS_N * THREADS_PER_WARP * WARPS_M; \
const int ROWS_PER_CTA = WARPS_M; \
const int grid = static_cast<int>( \
std::ceil(batch_size / static_cast<float>(ROWS_PER_CTA))); \
phi::funcs::fast_ln_fwd_kernel<T, \
U, \
ScaleT, \
VecSize, \
WARPS_M, \
WARPS_N, \
BYTES_PER_LDG, \
feature_size> \
<<<grid, THREADS_PER_CTA, 0, stream>>>( \
batch_size, \
feature_size, \
epsilon, \
x_data, \
static_cast<const ScaleT *>(void_scale_data), \
static_cast<const ScaleT *>(void_bias_data), \
mean_data, \
var_data, \
y_data); \
} break
#define PADDLE_LAUNCH_FAST_LAYERNORM_FWD(ScaleT) \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 768); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1024); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1280); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1536); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 1792); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 2048); \
PADDLE_LAUNCH_FAST_LAYERNORM_FWD_BASE(ScaleT, 4096)
#ifdef PADDLE_WITH_CUDA
bool can_call_fast_kernel = false;
if ((feature_size >= 768 && feature_size <= 2048 && feature_size % 256 == 0 ||
feature_size == 4096) &&
scale != nullptr && bias != nullptr) {
can_call_fast_kernel = true;
}
if (can_call_fast_kernel) {
if (is_scale_bias_same_dtype_with_x) {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(T);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"256 is supported "
"now"));
break;
}
} else {
switch (feature_size) {
PADDLE_LAUNCH_FAST_LAYERNORM_FWD(U);
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"Only when feature_size is from 256 to 4096 and is diviaible by "
"is supported "
"now"));
break;
}
}
} else {
// WarpShuffle intrinsics is involved in LaunchLayerNormKernel.
if (FLAGS_use_fast_math && feature_size <= 1024 &&
(!std::is_same<T, int8_t>::value)) {
LaunchLayerNormKernel<Context, T, U>(dev_ctx,
x_data,
y_data,
void_scale_data,
void_bias_data,
mean_data,
var_data,
epsilon,
batch_size,
feature_size,
valid_scale,
valid_bias,
is_scale_bias_same_dtype_with_x);
} else {
#endif
if (is_scale_bias_same_dtype_with_x) {
PADDLE_LAUNCH_LAYERNORM_FWD(T, true);
} else {
PADDLE_LAUNCH_LAYERNORM_FWD(U, false);
}
#ifdef PADDLE_WITH_CUDA
}
}
#endif
#undef PADDLE_LAUNCH_LAYERNORM_FWD
#undef PADDLE_LAUNCH_FAST_LAYERNORM_FWD
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#elif CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#else
PD_REGISTER_KERNEL(layer_norm,
GPU,
ALL_LAYOUT,
phi::LayerNormKernel,
float,
double,
phi::dtype::float16) {
kernel->OutputAt(1).SetDataType(phi::DataType::UNDEFINED);
kernel->OutputAt(2).SetDataType(phi::DataType::UNDEFINED);
}
#endif
|
d946489fcb281a188fe3d38fad30bf37b271ceaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
__global__ void ReLUDeconv(const int n, const Dtype* in_diff,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] > 0 ? in_diff[index] : 0;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Deconv_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (negative_slope != Dtype(0))
LOG(WARNING) << "negative_slope parameter = " << negative_slope << " but nonzero negative_slope params are not supported for Deconv through RELU.";
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUDeconv<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
//INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
INSTANTIATE_LAYER_GPU_FUNCS_WITH_DECONV(ReLULayer);
} // namespace caffe
| d946489fcb281a188fe3d38fad30bf37b271ceaf.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
__global__ void ReLUDeconv(const int n, const Dtype* in_diff,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] > 0 ? in_diff[index] : 0;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Deconv_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (negative_slope != Dtype(0))
LOG(WARNING) << "negative_slope parameter = " << negative_slope << " but nonzero negative_slope params are not supported for Deconv through RELU.";
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUDeconv<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
//INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
INSTANTIATE_LAYER_GPU_FUNCS_WITH_DECONV(ReLULayer);
} // namespace caffe
|
bb0548e36dd914ef90b3617f1bfec46a2105797e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <malloc.h>
__global__ void add(int *x, int *y, int *z)
{
*z = *x + *y;
printf("z is %d\n", *z);
}
int main()
{
//Declaration
int *a, *b, *c;
int *deva, *devb, *devc;
//Dynamic Memory Allocation in Host
a = (int *)malloc(sizeof(int));
b = (int *)malloc(sizeof(int));
c = (int *)malloc(sizeof(int));
//Reserving Memory in Device
hipMalloc((int **)&deva, sizeof(int));
hipMalloc((int **)&devb, sizeof(int));
hipMalloc((int **)&devc, sizeof(int));
//Inputting values from user
printf("Enter value of a and b\n");
scanf("%d %d", a, b);
/**c = *a + *b;
printf("answer: %d\n", *c);*/
//Coping values from HostToDevice
hipMemcpy(deva, a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(devb, b, sizeof(int), hipMemcpyHostToDevice);
//Calling Kernel
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, deva, devb, devc);
//Coping values from DeviceToHost
hipMemcpy(c, devc, sizeof(int), hipMemcpyDeviceToHost);
printf("Result is: %d\n", *c);
//Free-up the memory
hipFree(deva), hipFree(devb), hipFree(devc);
return 0;
}
| bb0548e36dd914ef90b3617f1bfec46a2105797e.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <malloc.h>
__global__ void add(int *x, int *y, int *z)
{
*z = *x + *y;
printf("z is %d\n", *z);
}
int main()
{
//Declaration
int *a, *b, *c;
int *deva, *devb, *devc;
//Dynamic Memory Allocation in Host
a = (int *)malloc(sizeof(int));
b = (int *)malloc(sizeof(int));
c = (int *)malloc(sizeof(int));
//Reserving Memory in Device
cudaMalloc((int **)&deva, sizeof(int));
cudaMalloc((int **)&devb, sizeof(int));
cudaMalloc((int **)&devc, sizeof(int));
//Inputting values from user
printf("Enter value of a and b\n");
scanf("%d %d", a, b);
/**c = *a + *b;
printf("answer: %d\n", *c);*/
//Coping values from HostToDevice
cudaMemcpy(deva, a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(devb, b, sizeof(int), cudaMemcpyHostToDevice);
//Calling Kernel
add<<<1,1>>>(deva, devb, devc);
//Coping values from DeviceToHost
cudaMemcpy(c, devc, sizeof(int), cudaMemcpyDeviceToHost);
printf("Result is: %d\n", *c);
//Free-up the memory
cudaFree(deva), cudaFree(devb), cudaFree(devc);
return 0;
}
|
3a5dc9a7f21e0e2d775827bc5b69a3c66055ebab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/pad.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/variable.hpp>
namespace nbla {
using cuda::Index_t;
struct AxisParam {
Index_t x_stride;
Index_t y_stride;
Index_t y_shape;
struct {
Index_t first;
Index_t second;
} pad;
};
template <int DIMENSIONS>
__inline__ __device__ void d_init_index_map(const Index_t y_idx,
Index_t *idx_map, const int ndim,
const AxisParam *params) {
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
Index_t y_tmp = y_idx;
Index_t x_idx = 0;
#pragma unroll
for (int axis = 0; axis < NDIM; axis++) {
const auto ¶m = params[axis];
const auto axis_idx = y_tmp / param.y_stride;
y_tmp -= axis_idx * param.y_stride;
if ((axis_idx < param.pad.first) ||
(axis_idx >= param.y_shape - param.pad.second)) {
return;
}
x_idx += (axis_idx - param.pad.first) * param.x_stride;
}
idx_map[y_idx] = x_idx;
}
template <int DIMENSIONS = 0>
__global__ void init_index_map(const Index_t size, Index_t *idx_map,
const int ndim, const AxisParam *params) {
extern __shared__ AxisParam shared[];
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_init_index_map<DIMENSIONS>(i, idx_map, ndim, shared);
}
}
namespace pad_constant_impl {
template <typename T, int DIMENSIONS>
__inline__ __device__ void d_pad_forward(const Index_t y_idx, const T *x, T *y,
const int ndim,
const AxisParam *params, const T val) {
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
Index_t y_tmp = y_idx;
Index_t x_idx = 0;
#pragma unroll
for (int axis = 0; axis < NDIM; axis++) {
const auto ¶m = params[axis];
const auto axis_idx = y_tmp / param.y_stride;
y_tmp -= axis_idx * param.y_stride;
if ((axis_idx < param.pad.first) ||
(axis_idx >= param.y_shape - param.pad.second)) {
y[y_idx] = val;
return;
}
x_idx += (axis_idx - param.pad.first) * param.x_stride;
}
y[y_idx] = x[x_idx];
}
template <typename T, int DIMENSIONS = 0>
__global__ void pad_forward(const Index_t size, const T *x, T *y,
const int ndim, const AxisParam *params,
const T constant_value) {
extern __shared__ AxisParam shared[];
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_pad_forward<T, DIMENSIONS>(i, x, y, ndim, shared, constant_value);
}
}
template <typename T, bool ACCUMULATE, int DIMENSIONS>
__inline__ __device__ void d_pad_backward(const Index_t y_idx, const T *dy,
T *dx, const int ndim,
const AxisParam *params) {
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
Index_t y_tmp = y_idx;
Index_t x_idx = 0;
#pragma unroll
for (int axis = 0; axis < NDIM; axis++) {
const auto ¶m = params[axis];
const auto axis_idx = y_tmp / param.y_stride;
y_tmp -= axis_idx * param.y_stride;
if ((axis_idx < param.pad.first) ||
(axis_idx >= param.y_shape - param.pad.second)) {
return;
}
x_idx += (axis_idx - param.pad.first) * param.x_stride;
}
dx[x_idx] = ACCUMULATE ? dx[x_idx] + dy[y_idx] : dy[y_idx];
}
template <typename T, int DIMENSIONS = 0, bool ACCUMULATE = false>
__global__ void pad_backward(const Index_t size, const T *dy, T *dx,
const int ndim, const AxisParam *params) {
extern __shared__ AxisParam shared[];
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_pad_backward<T, ACCUMULATE, DIMENSIONS>(i, dy, dx, ndim, shared);
}
}
} // namespace pad_constant_impl
namespace pad_reflect_impl {
__inline__ __device__ Index_t reflect_index(Index_t idx, Index_t len) {
return len > 0 ? std::abs(((idx / len) & 1) * len - (idx % len)) : 0;
}
__inline__ __device__ void d_pad_index_map(const Index_t dst_idx,
Index_t *idx_map, const int ndim,
const int axis,
const AxisParam *params) {
// This function runs for each idx_map element and copies the index
// of a reflected output element to the corresponding location in
// idx_map. The idx_map has the same size as the output array.
Index_t dst_tmp = dst_idx;
Index_t src_idx = 0;
Index_t axis_idx = 0;
for (int ax = 0; ax < axis; ax++) {
axis_idx = dst_tmp / params[ax].y_stride;
dst_tmp = dst_tmp - axis_idx * params[ax].y_stride;
src_idx += axis_idx * params[ax].y_stride;
}
axis_idx = dst_tmp / params[axis].y_stride;
dst_tmp = dst_tmp - axis_idx * params[axis].y_stride;
const auto pad_sum = params[axis].pad.first + params[axis].pad.second;
const auto src_len = params[axis].y_shape - pad_sum;
if (axis_idx < params[axis].pad.first) {
const auto p = params[axis].pad.first;
const auto r = reflect_index(p - axis_idx, src_len - 1);
src_idx += (p + r) * params[axis].y_stride;
for (int ax = axis + 1; ax < ndim; ax++) {
axis_idx = dst_tmp / params[ax].y_stride;
dst_tmp = dst_tmp - axis_idx * params[ax].y_stride;
src_idx += axis_idx * params[ax].y_stride;
}
idx_map[dst_idx] = idx_map[src_idx];
return;
}
if (axis_idx >= params[axis].y_shape - params[axis].pad.second) {
const auto p = params[axis].pad.first + src_len;
const auto r = reflect_index(axis_idx - p + 1, src_len - 1);
src_idx += (p - r - 1) * params[axis].y_stride;
for (int ax = axis + 1; ax < ndim; ax++) {
axis_idx = dst_tmp / params[ax].y_stride;
dst_tmp = dst_tmp - axis_idx * params[ax].y_stride;
src_idx += axis_idx * params[ax].y_stride;
}
idx_map[dst_idx] = idx_map[src_idx];
return;
}
}
__global__ void pad_index_map(const Index_t size, Index_t *idx, const int ndim,
const int axis, const AxisParam *params) {
extern __shared__ AxisParam shared[];
if (threadIdx.x < ndim * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_pad_index_map(i, idx, ndim, axis, shared);
}
}
template <typename T>
__global__ void pad_forward(const Index_t size, const T *x, T *y,
const Index_t *idx_map) {
NBLA_CUDA_KERNEL_LOOP(i, size) { y[i] = x[idx_map[i]]; }
}
template <typename T>
__global__ void pad_backward(const Index_t size, const T *dy, T *dx,
const Index_t *idx_map) {
NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(&dx[idx_map[i]], dy[i]); }
}
} // namespace pad_reflect_impl
template <typename T>
void PadCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) {
Pad<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
Variable &x_var = *inputs[0];
Variable &y_var = *outputs[0];
std::vector<AxisParam> h_params;
h_params.reserve(this->padding_.size());
for (int axis = 0; axis < this->padding_.size(); axis++) {
AxisParam axis_param;
axis_param.x_stride = this->x_stride_.at(axis);
axis_param.y_stride = this->y_stride_.at(axis);
axis_param.y_shape = this->y_shape_.at(axis);
axis_param.pad.first = this->padding_.at(axis).first;
axis_param.pad.second = this->padding_.at(axis).second;
h_params.push_back(axis_param);
}
auto bytes = h_params.size() * sizeof(AxisParam);
auto array = new CudaCachedArray(bytes, get_dtype<char>(), this->ctx_);
auto d_params = array->pointer<AxisParam>();
NBLA_CUDA_CHECK(
hipMemcpy(d_params, h_params.data(), bytes, hipMemcpyHostToDevice));
this->parameter_memory_ = std::unique_ptr<CudaCachedArray>(std::move(array));
}
template <typename T>
void PadCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
Variable &x_var = *inputs[0];
Variable &y_var = *outputs[0];
const auto y_size = y_var.size();
const auto ndim = this->padding_.size();
auto x = x_var.get_data_pointer<Tcu>(this->ctx_);
auto y = y_var.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto threads = 128;
auto blocks = cuda_get_blocks_by_size(y_var.size());
auto shared = this->parameter_memory_->size();
auto params = this->parameter_memory_->template pointer<AxisParam>();
if (this->pad_mode_ == this->PAD_CONSTANT) {
using pad_constant_impl::pad_forward;
auto cvalue = this->constant_value_;
void (*kernel)(const Index_t, const Tcu *, Tcu *, const int,
const AxisParam *, const Tcu);
if (ndim == 1) {
kernel = pad_forward<Tcu, 1>;
} else if (ndim == 2) {
kernel = pad_forward<Tcu, 2>;
} else if (ndim == 3) {
kernel = pad_forward<Tcu, 3>;
} else if (ndim == 4) {
kernel = pad_forward<Tcu, 4>;
} else {
kernel = pad_forward<Tcu>;
}
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), shared, 0, y_size, x, y, ndim, params, cvalue);
NBLA_CUDA_KERNEL_CHECK();
}
else if (this->pad_mode_ == this->PAD_REFLECT) {
using namespace pad_reflect_impl;
Variable &idx_map = this->index_map_;
auto idx = idx_map.cast_data_and_get_pointer<Index_t>(this->ctx_, false);
void (*kernel)(const Index_t, Index_t *, const int, const AxisParam *);
if (ndim == 1) {
kernel = init_index_map<1>;
} else if (ndim == 2) {
kernel = init_index_map<2>;
} else if (ndim == 3) {
kernel = init_index_map<3>;
} else if (ndim == 4) {
kernel = init_index_map<4>;
} else {
kernel = init_index_map<>;
}
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), shared, 0, y_size, idx, ndim, params);
NBLA_CUDA_KERNEL_CHECK();
// Padding the index map must be done with individual kernel
// launches to synchronize index values which become source of
// padding for the next outer axis.
for (int axis = ndim - 1; axis >= 0; axis--) {
auto kernel = pad_index_map;
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), shared, 0, y_size, idx, ndim, axis, params);
NBLA_CUDA_KERNEL_CHECK();
}
// Perform y[i] = x[idx[i]] for all i in y_size
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(pad_forward, y_size, x, y, idx);
}
}
template <typename T>
void PadCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum_gradient) {
if (propagate_down[0]) {
cuda_set_device(this->device_);
auto accum = accum_gradient[0];
Variable &x_var = *inputs[0];
Variable &y_var = *outputs[0];
const auto ndim = this->padding_.size();
auto dy = y_var.get_grad_pointer<Tcu>(this->ctx_);
if (this->pad_mode_ == this->PAD_CONSTANT) {
using namespace pad_constant_impl;
auto dx = x_var.cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum);
auto threads = 128;
auto blocks = cuda_get_blocks_by_size(y_var.size());
auto shared = this->parameter_memory_->size();
auto params = this->parameter_memory_->template pointer<AxisParam>();
void (*kernel)(const Index_t, const Tcu *, Tcu *, const int,
const AxisParam *);
if (ndim == 1) {
kernel = accum ? pad_backward<Tcu, 1, true> : pad_backward<Tcu, 1>;
} else if (ndim == 2) {
kernel = accum ? pad_backward<Tcu, 2, true> : pad_backward<Tcu, 2>;
} else if (ndim == 3) {
kernel = accum ? pad_backward<Tcu, 3, true> : pad_backward<Tcu, 3>;
} else if (ndim == 4) {
kernel = accum ? pad_backward<Tcu, 4, true> : pad_backward<Tcu, 4>;
} else {
kernel = accum ? pad_backward<Tcu, 0, true> : pad_backward<Tcu>;
}
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), shared, 0, y_var.size(), dy, dx, ndim, params);
NBLA_CUDA_KERNEL_CHECK();
}
else if (this->pad_mode_ == this->PAD_REFLECT) {
if (!accum) {
x_var.grad()->zero();
}
Variable &idx_map = this->index_map_;
auto idx = idx_map.get_data_pointer<Index_t>(this->ctx_);
auto dx = x_var.cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
auto backward = pad_reflect_impl::pad_backward<Tcu>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward, y_var.size(), dy, dx, idx);
}
}
}
} // namespace nbla
| 3a5dc9a7f21e0e2d775827bc5b69a3c66055ebab.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/pad.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/variable.hpp>
namespace nbla {
using cuda::Index_t;
struct AxisParam {
Index_t x_stride;
Index_t y_stride;
Index_t y_shape;
struct {
Index_t first;
Index_t second;
} pad;
};
template <int DIMENSIONS>
__inline__ __device__ void d_init_index_map(const Index_t y_idx,
Index_t *idx_map, const int ndim,
const AxisParam *params) {
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
Index_t y_tmp = y_idx;
Index_t x_idx = 0;
#pragma unroll
for (int axis = 0; axis < NDIM; axis++) {
const auto ¶m = params[axis];
const auto axis_idx = y_tmp / param.y_stride;
y_tmp -= axis_idx * param.y_stride;
if ((axis_idx < param.pad.first) ||
(axis_idx >= param.y_shape - param.pad.second)) {
return;
}
x_idx += (axis_idx - param.pad.first) * param.x_stride;
}
idx_map[y_idx] = x_idx;
}
template <int DIMENSIONS = 0>
__global__ void init_index_map(const Index_t size, Index_t *idx_map,
const int ndim, const AxisParam *params) {
extern __shared__ AxisParam shared[];
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_init_index_map<DIMENSIONS>(i, idx_map, ndim, shared);
}
}
namespace pad_constant_impl {
template <typename T, int DIMENSIONS>
__inline__ __device__ void d_pad_forward(const Index_t y_idx, const T *x, T *y,
const int ndim,
const AxisParam *params, const T val) {
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
Index_t y_tmp = y_idx;
Index_t x_idx = 0;
#pragma unroll
for (int axis = 0; axis < NDIM; axis++) {
const auto ¶m = params[axis];
const auto axis_idx = y_tmp / param.y_stride;
y_tmp -= axis_idx * param.y_stride;
if ((axis_idx < param.pad.first) ||
(axis_idx >= param.y_shape - param.pad.second)) {
y[y_idx] = val;
return;
}
x_idx += (axis_idx - param.pad.first) * param.x_stride;
}
y[y_idx] = x[x_idx];
}
template <typename T, int DIMENSIONS = 0>
__global__ void pad_forward(const Index_t size, const T *x, T *y,
const int ndim, const AxisParam *params,
const T constant_value) {
extern __shared__ AxisParam shared[];
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_pad_forward<T, DIMENSIONS>(i, x, y, ndim, shared, constant_value);
}
}
template <typename T, bool ACCUMULATE, int DIMENSIONS>
__inline__ __device__ void d_pad_backward(const Index_t y_idx, const T *dy,
T *dx, const int ndim,
const AxisParam *params) {
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
Index_t y_tmp = y_idx;
Index_t x_idx = 0;
#pragma unroll
for (int axis = 0; axis < NDIM; axis++) {
const auto ¶m = params[axis];
const auto axis_idx = y_tmp / param.y_stride;
y_tmp -= axis_idx * param.y_stride;
if ((axis_idx < param.pad.first) ||
(axis_idx >= param.y_shape - param.pad.second)) {
return;
}
x_idx += (axis_idx - param.pad.first) * param.x_stride;
}
dx[x_idx] = ACCUMULATE ? dx[x_idx] + dy[y_idx] : dy[y_idx];
}
template <typename T, int DIMENSIONS = 0, bool ACCUMULATE = false>
__global__ void pad_backward(const Index_t size, const T *dy, T *dx,
const int ndim, const AxisParam *params) {
extern __shared__ AxisParam shared[];
const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim;
if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_pad_backward<T, ACCUMULATE, DIMENSIONS>(i, dy, dx, ndim, shared);
}
}
} // namespace pad_constant_impl
namespace pad_reflect_impl {
__inline__ __device__ Index_t reflect_index(Index_t idx, Index_t len) {
return len > 0 ? std::abs(((idx / len) & 1) * len - (idx % len)) : 0;
}
__inline__ __device__ void d_pad_index_map(const Index_t dst_idx,
Index_t *idx_map, const int ndim,
const int axis,
const AxisParam *params) {
// This function runs for each idx_map element and copies the index
// of a reflected output element to the corresponding location in
// idx_map. The idx_map has the same size as the output array.
Index_t dst_tmp = dst_idx;
Index_t src_idx = 0;
Index_t axis_idx = 0;
for (int ax = 0; ax < axis; ax++) {
axis_idx = dst_tmp / params[ax].y_stride;
dst_tmp = dst_tmp - axis_idx * params[ax].y_stride;
src_idx += axis_idx * params[ax].y_stride;
}
axis_idx = dst_tmp / params[axis].y_stride;
dst_tmp = dst_tmp - axis_idx * params[axis].y_stride;
const auto pad_sum = params[axis].pad.first + params[axis].pad.second;
const auto src_len = params[axis].y_shape - pad_sum;
if (axis_idx < params[axis].pad.first) {
const auto p = params[axis].pad.first;
const auto r = reflect_index(p - axis_idx, src_len - 1);
src_idx += (p + r) * params[axis].y_stride;
for (int ax = axis + 1; ax < ndim; ax++) {
axis_idx = dst_tmp / params[ax].y_stride;
dst_tmp = dst_tmp - axis_idx * params[ax].y_stride;
src_idx += axis_idx * params[ax].y_stride;
}
idx_map[dst_idx] = idx_map[src_idx];
return;
}
if (axis_idx >= params[axis].y_shape - params[axis].pad.second) {
const auto p = params[axis].pad.first + src_len;
const auto r = reflect_index(axis_idx - p + 1, src_len - 1);
src_idx += (p - r - 1) * params[axis].y_stride;
for (int ax = axis + 1; ax < ndim; ax++) {
axis_idx = dst_tmp / params[ax].y_stride;
dst_tmp = dst_tmp - axis_idx * params[ax].y_stride;
src_idx += axis_idx * params[ax].y_stride;
}
idx_map[dst_idx] = idx_map[src_idx];
return;
}
}
__global__ void pad_index_map(const Index_t size, Index_t *idx, const int ndim,
const int axis, const AxisParam *params) {
extern __shared__ AxisParam shared[];
if (threadIdx.x < ndim * sizeof(AxisParam) / sizeof(int)) {
auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x];
reinterpret_cast<int *>(shared)[threadIdx.x] = tmp;
}
__syncthreads();
NBLA_CUDA_KERNEL_LOOP(i, size) {
d_pad_index_map(i, idx, ndim, axis, shared);
}
}
template <typename T>
__global__ void pad_forward(const Index_t size, const T *x, T *y,
const Index_t *idx_map) {
NBLA_CUDA_KERNEL_LOOP(i, size) { y[i] = x[idx_map[i]]; }
}
template <typename T>
__global__ void pad_backward(const Index_t size, const T *dy, T *dx,
const Index_t *idx_map) {
NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(&dx[idx_map[i]], dy[i]); }
}
} // namespace pad_reflect_impl
template <typename T>
void PadCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) {
Pad<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
Variable &x_var = *inputs[0];
Variable &y_var = *outputs[0];
std::vector<AxisParam> h_params;
h_params.reserve(this->padding_.size());
for (int axis = 0; axis < this->padding_.size(); axis++) {
AxisParam axis_param;
axis_param.x_stride = this->x_stride_.at(axis);
axis_param.y_stride = this->y_stride_.at(axis);
axis_param.y_shape = this->y_shape_.at(axis);
axis_param.pad.first = this->padding_.at(axis).first;
axis_param.pad.second = this->padding_.at(axis).second;
h_params.push_back(axis_param);
}
auto bytes = h_params.size() * sizeof(AxisParam);
auto array = new CudaCachedArray(bytes, get_dtype<char>(), this->ctx_);
auto d_params = array->pointer<AxisParam>();
NBLA_CUDA_CHECK(
cudaMemcpy(d_params, h_params.data(), bytes, cudaMemcpyHostToDevice));
this->parameter_memory_ = std::unique_ptr<CudaCachedArray>(std::move(array));
}
template <typename T>
void PadCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
Variable &x_var = *inputs[0];
Variable &y_var = *outputs[0];
const auto y_size = y_var.size();
const auto ndim = this->padding_.size();
auto x = x_var.get_data_pointer<Tcu>(this->ctx_);
auto y = y_var.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto threads = 128;
auto blocks = cuda_get_blocks_by_size(y_var.size());
auto shared = this->parameter_memory_->size();
auto params = this->parameter_memory_->template pointer<AxisParam>();
if (this->pad_mode_ == this->PAD_CONSTANT) {
using pad_constant_impl::pad_forward;
auto cvalue = this->constant_value_;
void (*kernel)(const Index_t, const Tcu *, Tcu *, const int,
const AxisParam *, const Tcu);
if (ndim == 1) {
kernel = pad_forward<Tcu, 1>;
} else if (ndim == 2) {
kernel = pad_forward<Tcu, 2>;
} else if (ndim == 3) {
kernel = pad_forward<Tcu, 3>;
} else if (ndim == 4) {
kernel = pad_forward<Tcu, 4>;
} else {
kernel = pad_forward<Tcu>;
}
kernel<<<blocks, threads, shared>>>(y_size, x, y, ndim, params, cvalue);
NBLA_CUDA_KERNEL_CHECK();
}
else if (this->pad_mode_ == this->PAD_REFLECT) {
using namespace pad_reflect_impl;
Variable &idx_map = this->index_map_;
auto idx = idx_map.cast_data_and_get_pointer<Index_t>(this->ctx_, false);
void (*kernel)(const Index_t, Index_t *, const int, const AxisParam *);
if (ndim == 1) {
kernel = init_index_map<1>;
} else if (ndim == 2) {
kernel = init_index_map<2>;
} else if (ndim == 3) {
kernel = init_index_map<3>;
} else if (ndim == 4) {
kernel = init_index_map<4>;
} else {
kernel = init_index_map<>;
}
kernel<<<blocks, threads, shared>>>(y_size, idx, ndim, params);
NBLA_CUDA_KERNEL_CHECK();
// Padding the index map must be done with individual kernel
// launches to synchronize index values which become source of
// padding for the next outer axis.
for (int axis = ndim - 1; axis >= 0; axis--) {
auto kernel = pad_index_map;
kernel<<<blocks, threads, shared>>>(y_size, idx, ndim, axis, params);
NBLA_CUDA_KERNEL_CHECK();
}
// Perform y[i] = x[idx[i]] for all i in y_size
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(pad_forward, y_size, x, y, idx);
}
}
template <typename T>
void PadCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum_gradient) {
if (propagate_down[0]) {
cuda_set_device(this->device_);
auto accum = accum_gradient[0];
Variable &x_var = *inputs[0];
Variable &y_var = *outputs[0];
const auto ndim = this->padding_.size();
auto dy = y_var.get_grad_pointer<Tcu>(this->ctx_);
if (this->pad_mode_ == this->PAD_CONSTANT) {
using namespace pad_constant_impl;
auto dx = x_var.cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum);
auto threads = 128;
auto blocks = cuda_get_blocks_by_size(y_var.size());
auto shared = this->parameter_memory_->size();
auto params = this->parameter_memory_->template pointer<AxisParam>();
void (*kernel)(const Index_t, const Tcu *, Tcu *, const int,
const AxisParam *);
if (ndim == 1) {
kernel = accum ? pad_backward<Tcu, 1, true> : pad_backward<Tcu, 1>;
} else if (ndim == 2) {
kernel = accum ? pad_backward<Tcu, 2, true> : pad_backward<Tcu, 2>;
} else if (ndim == 3) {
kernel = accum ? pad_backward<Tcu, 3, true> : pad_backward<Tcu, 3>;
} else if (ndim == 4) {
kernel = accum ? pad_backward<Tcu, 4, true> : pad_backward<Tcu, 4>;
} else {
kernel = accum ? pad_backward<Tcu, 0, true> : pad_backward<Tcu>;
}
kernel<<<blocks, threads, shared>>>(y_var.size(), dy, dx, ndim, params);
NBLA_CUDA_KERNEL_CHECK();
}
else if (this->pad_mode_ == this->PAD_REFLECT) {
if (!accum) {
x_var.grad()->zero();
}
Variable &idx_map = this->index_map_;
auto idx = idx_map.get_data_pointer<Index_t>(this->ctx_);
auto dx = x_var.cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
auto backward = pad_reflect_impl::pad_backward<Tcu>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward, y_var.size(), dy, dx, idx);
}
}
}
} // namespace nbla
|
9be27fbb411e65e833ab5680092afe9483c25bf6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
__global__ void fill(int * v,std::size_t size){
// Get the id of the thread (0 -> 99)
auto tid = threadIdx.x;
v[tid] = tid;
}
int main(){
std::vector<int> v(100);
int * v_d = nullptr;
//Allocate on the Device
hipMalloc(&v_d,v.size()*sizeof(int));
hipLaunchKernelGGL(( fill), dim3(1),dim3(100), 0, 0, v_d, v.size());
hipMemcpy(v.data(),v_d,v.size() * sizeof(int), hipMemcpyDeviceToHost);
for(auto x: v){
std::cout<< x <<" ";
}
}
| 9be27fbb411e65e833ab5680092afe9483c25bf6.cu | #include <iostream>
#include <vector>
#include <cuda_runtime.h>
__global__ void fill(int * v,std::size_t size){
// Get the id of the thread (0 -> 99)
auto tid = threadIdx.x;
v[tid] = tid;
}
int main(){
std::vector<int> v(100);
int * v_d = nullptr;
//Allocate on the Device
cudaMalloc(&v_d,v.size()*sizeof(int));
fill<<<1,100>>>(v_d, v.size());
cudaMemcpy(v.data(),v_d,v.size() * sizeof(int), cudaMemcpyDeviceToHost);
for(auto x: v){
std::cout<< x <<" ";
}
}
|
58c143f9c0712c89dbd2d4454cdbc413f65f598a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "VoxelCopyToVAO.cuh"
//#include "COpenGLTypes.h"
//#include "CVoxelFunctions.cuh"
//#include "CSVOTypes.h"
//#include <cstdio>
//#include <cassert>
//#include "GIVoxelPages.h"
//
//__global__ void VoxCountPage(int& totalVox,
//
// const CVoxelPage* gVoxPages,
// const CVoxelGrid& gGridInfo,
// const uint32_t pageCount)
//{
// unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int pageId = globalId / GIVoxelPages::PageSize;
// unsigned int pageLocalId = (globalId - pageId * GIVoxelPages::PageSize);
//
// // All one normal means invalid voxel
// if(gVoxPages[pageId].dGridVoxPos[pageLocalId] != 0xFFFFFFFF)
// atomicAdd(&totalVox, 1);
//}
//
//__global__ void VoxCpyPage(// Two ogl Buffers for rendering used voxels
// CVoxelNormPos* voxelNormPosData,
// uchar4* voxelColorData,
// unsigned int& atomicIndex,
// const unsigned int maxBufferSize,
//
// // Per Obj Segment
// ushort2** gObjectAllocLocations,
//
// // Per obj
// unsigned int** gObjectAllocIndexLookup,
//
// // Per vox
// CVoxelAlbedo** gVoxelRenderData,
//
// // Page
// const CVoxelPage* gVoxPages,
// uint32_t pageCount,
// const CVoxelGrid& gGridInfo)
//{
// unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int pageId = blockIdx.x / GIVoxelPages::BlockPerPage;
// unsigned int pageLocalId = globalId - (pageId * GIVoxelPages::PageSize);
// unsigned int pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize;
// unsigned int segmentLocalVoxId = pageLocalId % GIVoxelPages::SegmentSize;
//
// // Skip whole segment if necessary
// if(ExpandOnlyOccupation(gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId].packed) == SegmentOccupation::EMPTY) return;
// assert(ExpandOnlyOccupation(gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId].packed) != SegmentOccupation::MARKED_FOR_CLEAR);
//
// // Data Read
// CVoxelPos voxPosPacked = gVoxPages[pageId].dGridVoxPos[pageLocalId];
//
// // All one normal means invalid voxel
// if(voxPosPacked != 0xFFFFFFFF)
// {
// CVoxelPos voxNormpacked = gVoxPages[pageId].dGridVoxNorm[pageLocalId];
//
// unsigned int index = atomicInc(&atomicIndex, 0xFFFFFFFF);
// assert(index < maxBufferSize);
//
// // Fetch obj Id to get color
// // ObjId Fetch
// ushort2 objectId;
// SegmentObjData objData = gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId];
// objectId.x = objData.objId;
// objectId.y = objData.batchId;
// unsigned int cacheVoxelId = objData.voxStride + segmentLocalVoxId;
//
// voxelNormPosData[index] = uint2{voxPosPacked, voxNormpacked};
// voxelColorData[index] = gVoxelRenderData[objectId.y][cacheVoxelId];
// }
//}
//
| 58c143f9c0712c89dbd2d4454cdbc413f65f598a.cu | //#include "VoxelCopyToVAO.cuh"
//#include "COpenGLTypes.h"
//#include "CVoxelFunctions.cuh"
//#include "CSVOTypes.h"
//#include <cstdio>
//#include <cassert>
//#include "GIVoxelPages.h"
//
//__global__ void VoxCountPage(int& totalVox,
//
// const CVoxelPage* gVoxPages,
// const CVoxelGrid& gGridInfo,
// const uint32_t pageCount)
//{
// unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int pageId = globalId / GIVoxelPages::PageSize;
// unsigned int pageLocalId = (globalId - pageId * GIVoxelPages::PageSize);
//
// // All one normal means invalid voxel
// if(gVoxPages[pageId].dGridVoxPos[pageLocalId] != 0xFFFFFFFF)
// atomicAdd(&totalVox, 1);
//}
//
//__global__ void VoxCpyPage(// Two ogl Buffers for rendering used voxels
// CVoxelNormPos* voxelNormPosData,
// uchar4* voxelColorData,
// unsigned int& atomicIndex,
// const unsigned int maxBufferSize,
//
// // Per Obj Segment
// ushort2** gObjectAllocLocations,
//
// // Per obj
// unsigned int** gObjectAllocIndexLookup,
//
// // Per vox
// CVoxelAlbedo** gVoxelRenderData,
//
// // Page
// const CVoxelPage* gVoxPages,
// uint32_t pageCount,
// const CVoxelGrid& gGridInfo)
//{
// unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int pageId = blockIdx.x / GIVoxelPages::BlockPerPage;
// unsigned int pageLocalId = globalId - (pageId * GIVoxelPages::PageSize);
// unsigned int pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize;
// unsigned int segmentLocalVoxId = pageLocalId % GIVoxelPages::SegmentSize;
//
// // Skip whole segment if necessary
// if(ExpandOnlyOccupation(gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId].packed) == SegmentOccupation::EMPTY) return;
// assert(ExpandOnlyOccupation(gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId].packed) != SegmentOccupation::MARKED_FOR_CLEAR);
//
// // Data Read
// CVoxelPos voxPosPacked = gVoxPages[pageId].dGridVoxPos[pageLocalId];
//
// // All one normal means invalid voxel
// if(voxPosPacked != 0xFFFFFFFF)
// {
// CVoxelPos voxNormpacked = gVoxPages[pageId].dGridVoxNorm[pageLocalId];
//
// unsigned int index = atomicInc(&atomicIndex, 0xFFFFFFFF);
// assert(index < maxBufferSize);
//
// // Fetch obj Id to get color
// // ObjId Fetch
// ushort2 objectId;
// SegmentObjData objData = gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId];
// objectId.x = objData.objId;
// objectId.y = objData.batchId;
// unsigned int cacheVoxelId = objData.voxStride + segmentLocalVoxId;
//
// voxelNormPosData[index] = uint2{voxPosPacked, voxNormpacked};
// voxelColorData[index] = gVoxelRenderData[objectId.y][cacheVoxelId];
// }
//}
//
|
8ced932c915213ae30b31e29febba5cd720aa359.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at { namespace native {
// We manually overload angle because std::arg does not work with types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t angle_wrapper(scalar_t v) {
if (at::_isnan(v)){
return v;
}
return v < 0 ? M_PI : 0;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> angle_wrapper(c10::complex<T> v) {
return std::arg(v);
}
void angle_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.common_dtype(), "angle_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return angle_wrapper(a);
});
});
}
// We manually overload real because std::real does not work types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t real_wrapper(scalar_t v) {
return v;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> real_wrapper(c10::complex<T> v) {
return v.real();
}
void real_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "real_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return real_wrapper(a);
});
});
}
// We manually overload imag because std::imag does not work types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t imag_wrapper(scalar_t v) {
return 0;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> imag_wrapper(c10::complex<T> v) {
return v.imag();
}
void imag_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "imag_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return imag_wrapper(a);
});
});
}
// We manually overload conj because std::conj does not work types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t conj_wrapper(scalar_t v) {
return v;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> conj_wrapper(c10::complex<T> v) {
return std::conj(v);
}
// NB: Ignores the negative bit on tensors
void conj_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kBFloat16, kHalf, iter.common_dtype(), "conj_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return conj_wrapper(a);
});
});
}
REGISTER_DISPATCH(angle_stub, &angle_kernel_cuda);
REGISTER_DISPATCH(real_stub, &real_kernel_cuda);
REGISTER_DISPATCH(imag_stub, &imag_kernel_cuda);
REGISTER_DISPATCH(conj_physical_stub, &conj_kernel_cuda);
}} // namespace at::native
| 8ced932c915213ae30b31e29febba5cd720aa359.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at { namespace native {
// We manually overload angle because std::arg does not work with types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t angle_wrapper(scalar_t v) {
if (at::_isnan(v)){
return v;
}
return v < 0 ? M_PI : 0;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> angle_wrapper(c10::complex<T> v) {
return std::arg(v);
}
void angle_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.common_dtype(), "angle_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return angle_wrapper(a);
});
});
}
// We manually overload real because std::real does not work types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t real_wrapper(scalar_t v) {
return v;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> real_wrapper(c10::complex<T> v) {
return v.real();
}
void real_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "real_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return real_wrapper(a);
});
});
}
// We manually overload imag because std::imag does not work types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t imag_wrapper(scalar_t v) {
return 0;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> imag_wrapper(c10::complex<T> v) {
return v.imag();
}
void imag_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "imag_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return imag_wrapper(a);
});
});
}
// We manually overload conj because std::conj does not work types other than c10::complex.
template<typename scalar_t>
__host__ __device__ static inline scalar_t conj_wrapper(scalar_t v) {
return v;
}
template<typename T>
__host__ __device__ static inline c10::complex<T> conj_wrapper(c10::complex<T> v) {
return std::conj(v);
}
// NB: Ignores the negative bit on tensors
void conj_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kBFloat16, kHalf, iter.common_dtype(), "conj_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return conj_wrapper(a);
});
});
}
REGISTER_DISPATCH(angle_stub, &angle_kernel_cuda);
REGISTER_DISPATCH(real_stub, &real_kernel_cuda);
REGISTER_DISPATCH(imag_stub, &imag_kernel_cuda);
REGISTER_DISPATCH(conj_physical_stub, &conj_kernel_cuda);
}} // namespace at::native
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.