hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
83eab352f7b36f83172b5dbf1a293574ccdded08.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Matriz_GPU_Mult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Matriz_GPU_Mult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Matriz_GPU_Mult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Matriz_GPU_Mult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 83eab352f7b36f83172b5dbf1a293574ccdded08.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Matriz_GPU_Mult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Matriz_GPU_Mult<<<gridBlock,threadBlock>>>(a,b,c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Matriz_GPU_Mult<<<gridBlock,threadBlock>>>(a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Matriz_GPU_Mult<<<gridBlock,threadBlock>>>(a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7d7a5b57bb0d6ff164aed03c0b01dee2b57403b7.hip | // !!! This is a file automatically generated by hipify!!!
#include <complex>
//#define complex _Complex
#include <fftw3.h>
#include "polynomials.h"
#include "lagrangehalfc_impl.h"
#include <cassert>
#include <cmath>
#include "cudaFFTTest.h"
#include <iostream>
#include <inttypes.h>
#include <stdio.h>
#include <cstdint>
using namespace std;
typedef std::complex<double> cplx;
#define BIT_16 16
#define BIT_32 32
//global
/*
cudaFFTProcessorTest cudaFFTProcessorTestTest_16(16);
cudaFFTProcessorTest cudaFFTProcessorTestTest_32(32);
cudaFFTProcessorTest_2 cudaFFTProcessorTestTest_2_16(16);
cudaFFTProcessorTest_2 cudaFFTProcessorTestTest_2_32(32);
cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_4(1024, 16, 1024);//for 4 bit//coalescing
cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_8(1024, 32, 1024);//for 8 bit//coalescing
cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_32(1024, 128, 1024);//for 32 bit//coalescing
*/
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_1(1024, 4, 1024);//for 1 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_2(1024, 8, 1024);//for 1 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_3(1024, 12, 1024);//for 1 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_4(1024, 16, 1024);//for 4 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_6(1024, 24, 1024);//for 6 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_8(1024, 32, 1024);//for 8 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_12(1024, 48, 1024);//for 12 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_16(1024, 64, 1024);//for 16 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_24(1024, 96, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_32(1024, 128, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_48(1024, 192, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_64(1024, 256, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_80(1024, 320, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_128(1024, 512, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_256(1024, 1024, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_512(1024, 2048, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_1024(1024, 4096, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_2048(1024, 8192, 1024);
//int nBits = 24;
//n bit single gate testing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_bBitsTesting(1024, nBits * 4, 1024);//for nBit bit//coalescing
//int nOuts = 2;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_1(1024, nOuts, 1, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_1_1(1024, 1, 1, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_2(1024, nOuts, 2, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_4(1024, nOuts, 4, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_6(1024, nOuts, 6, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_8(1024, nOuts, 8, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16(1024, nOuts, 16, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_1_16(1024, 1, 16, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_24(1024, nOuts, 24, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_32(1024, nOuts, 32, 4, 1024, 2);//for 32 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_bBitsTesting_2_output(1024, nOuts, 24, 4, 1024, 2);//(1024, nBits * nOuts * 4, 1024);//for nBit bit//coalescing
//for vector operations
//int vLen = 8;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_8(1024, nOuts, 8, BIT_16, 4, 1024, 4);//8 numbers each 16 bit//coalescing with 2 output
//// vLen = 4;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4(1024, nOuts, 4, BIT_16, 4, 1024, 4);//4 numbers each 16 bit//coalescing with 2 output
////vLen = 2;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2(1024, nOuts, 2, BIT_16, 4, 1024, 4);//2 numbers each 16 bit//coalescing with 2 output
////vLen = 1;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1(1024, nOuts, 1, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 32;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_32(1024, nOuts, 32, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 16;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_16(1024, nOuts, 16, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 16;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_256(1024, nOuts, 256, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
//vLen = 16
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_16_32(1024, nOuts, 16, BIT_32, 4, 1024, 4);//16 numbers each 32 bit//coalescing with 2 output
////vLen = 8
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_8_32(1024, nOuts, 8, BIT_32, 4, 1024, 4);//8 numbers each 32 bit//coalescing with 2 output
////vLen = 4
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4_32(1024, nOuts, 4, BIT_32, 4, 1024, 4);//4 numbers each 32 bit//coalescing with 2 output
////vLen = 2
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2_32(1024, nOuts, 2, BIT_32, 4, 1024, 4);//2 numbers each 32 bit//coalescing with 2 output
////vLen = 1
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1_32(1024, nOuts, 1, BIT_32, 4, 1024, 4);//1 numbers each 32 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_p5_32(1024, 128, 1024);//0.5 numbers each 32 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_p25_32(1024, 64, 1024);//0.25 numbers each 32 bit//coalescing with 2 output
//nbits = 8
//vLen = 4;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4_8(1024, nOuts, 4, 8, 4, 1024, 4);//4 numbers each 16 bit//coalescing with 2 output
////vLen = 2;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2_8(1024, nOuts, 2, 8, 4, 1024, 4);//2 numbers each 16 bit//coalescing with 2 output
////vLen = 1;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1_8(1024, nOuts, 1, 8, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
//
////nbits = 1
////vLen = 16
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_16_1(1024, nOuts, 16, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 8
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_8_1(1024, nOuts, 8, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 4
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4_1(1024, nOuts, 4, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 2
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2_1(1024, nOuts, 2, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 1
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1_1(1024, nOuts, 1, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 24
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_24_1(1024, nOuts, 24, 1, 4, 1024, 4);
////vLen = 12
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_12_1(1024, nOuts, 12, 1, 4, 1024, 4);
////vLen = 6
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_6_1(1024, nOuts, 6, 1, 4, 1024, 4);
////vLen = 3
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_3_1(1024, nOuts, 3, 1, 4, 1024, 4);
////vLen = 32
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_32_1(1024, nOuts, 32, 1, 4, 1024, 4);
////vLen = 64
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_64_1(1024, nOuts, 64, 1, 4, 1024, 4);
////vLen = 128
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_128_1(1024, nOuts, 128, 1, 4, 1024, 4);
////vLen = 256
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_256_1(1024, nOuts, 256, 1, 4, 1024, 4);
////vLen = 512
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_512_1(1024, nOuts, 512, 1, 4, 1024, 4);
//vLen = 4096
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4096_1(1024, nOuts, 4096, 1, 4, 1024, 4);
FFT_Processor_fftw::FFT_Processor_fftw(const int N): _2N(2*N),N(N),Ns2(N/2) {
rev_in = (double*) malloc(sizeof(double) * _2N);
out = (double*) malloc(sizeof(double) * _2N);
rev_out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * (N+1));
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * (N+1));
rev_p = fftw_plan_dft_r2c_1d(_2N, rev_in, rev_out, FFTW_ESTIMATE);
p = fftw_plan_dft_c2r_1d(_2N, in, out, FFTW_ESTIMATE);
omegaxminus1 = new cplx[_2N];
for (int x=0; x<_2N; x++) {
omegaxminus1[x]=cplx(cos(x*M_PI/N)-1.,-sin(x*M_PI/N)); // instead of cos(x*M_PI/N)-1. + sin(x*M_PI/N) * I
//exp(i.x.pi/N)-1
}
}
void FFT_Processor_fftw::execute_reverse_int(cplx* res, const int* a) {
cplx* rev_out_cplx = (cplx*) rev_out; //fftw_complex and cplx are layout-compatible
for (int i=0; i<N; i++) rev_in[i]=a[i]/2.;
for (int i=0; i<N; i++) rev_in[N+i]=-rev_in[i];
fftw_execute(rev_p);
for (int i=0; i<Ns2; i++) res[i]=rev_out_cplx[2*i+1];
for (int i=0; i<=Ns2; i++) assert(abs(rev_out_cplx[2*i])<1e-20);
}
void FFT_Processor_fftw::execute_reverse_torus32(cplx* res, const Torus32* a) {
static const double _2pm33 = 1./double(INT64_C(1)<<33);
int32_t* aa = (int32_t*) a;
cplx* rev_out_cplx = (cplx*) rev_out; //fftw_complex and cplx are layout-compatible
for (int i=0; i<N; i++) rev_in[i]=aa[i]*_2pm33;
for (int i=0; i<N; i++) rev_in[N+i]=-rev_in[i];
fftw_execute(rev_p);
for (int i=0; i<Ns2; i++) res[i]=rev_out_cplx[2*i+1];
for (int i=0; i<=Ns2; i++) assert(abs(rev_out_cplx[2*i])<1e-20);
}
void FFT_Processor_fftw::execute_direct_Torus32(Torus32* res, const cplx* a) {
static const double _2p32 = double(INT64_C(1)<<32);
static const double _1sN = double(1)/double(N);
cplx* in_cplx = (cplx*) in; //fftw_complex and cplx are layout-compatible
for (int i=0; i<=Ns2; i++) in_cplx[2*i]=0;
for (int i=0; i<Ns2; i++) in_cplx[2*i+1]=a[i];
fftw_execute(p);
for (int i=0; i<N; i++) {
res[i]=Torus32(int64_t(out[i] * _1sN * _2p32));//
}
//pas besoin du fmod... Torus32(int64_t(fmod(rev_out[i]*_1sN,1.)*_2p32));
for (int i=0; i<N; i++) assert(fabs(out[N+i]+out[i])<1e-20);
}
FFT_Processor_fftw::~FFT_Processor_fftw() {
fftw_destroy_plan(p);
fftw_destroy_plan(rev_p);
fftw_free(in); fftw_free(rev_out);
free(rev_in); free(out);
delete[] omegaxminus1;
}
/**
* FFT functions
*/
EXPORT void IntPolynomial_ifft(LagrangeHalfCPolynomial* result, const IntPolynomial* p) {
fp1024_fftw.execute_reverse_int(((LagrangeHalfCPolynomial_IMPL*)result)->coefsC, p->coefs);
}
EXPORT void TorusPolynomial_ifft(LagrangeHalfCPolynomial* result, const TorusPolynomial* p) {
fp1024_fftw.execute_reverse_torus32(((LagrangeHalfCPolynomial_IMPL*)result)->coefsC, p->coefsT);
}
EXPORT void TorusPolynomial_fft(TorusPolynomial* result, const LagrangeHalfCPolynomial* p) {
fp1024_fftw.execute_direct_Torus32(result->coefsT, ((LagrangeHalfCPolynomial_IMPL*)p)->coefsC);
}
EXPORT void IntPolynomial_ifft_16(hipfftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
cout << "IntPolynomial_ifft_16" << endl;
/*
if (bitSize == 16) {
cudaFFTProcessorTestTest_16.execute_reverse_int(result, p->coefs);
} else if(bitSize == 32) {
cudaFFTProcessorTestTest_32.execute_reverse_int(result, p->coefs);
}
*/
// hipDeviceSynchronize();
}
EXPORT void IntPolynomial_ifft_16_Coalesce(hipfftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_16.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_32.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_1.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 8) {
// cudaFFTProcessorTestTest_general_8.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 4) {
// cudaFFTProcessorTestTest_general_4.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 2) {
// cudaFFTProcessorTestTest_general_2.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 48) {
// cudaFFTProcessorTestTest_general_48.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 24) {
// cudaFFTProcessorTestTest_general_24.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 12) {
// cudaFFTProcessorTestTest_general_12.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 6) {
// cudaFFTProcessorTestTest_general_6.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 3) {
// cudaFFTProcessorTestTest_general_3.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 64) {
// cudaFFTProcessorTestTest_general_64.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 128) {
// cudaFFTProcessorTestTest_general_128.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 256) {
// cudaFFTProcessorTestTest_general_256.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 512) {
// cudaFFTProcessorTestTest_general_512.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1024) {
// cudaFFTProcessorTestTest_general_1024.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 2048) {
// cudaFFTProcessorTestTest_general_2048.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 80) {
// cudaFFTProcessorTestTest_general_80.execute_reverse_int(result, p->coefs);
// } else {
cout << "IntPolynomial_ifft_16_Coalesce: " << bitSize << endl;
// }
// hipDeviceSynchronize();
}
EXPORT void IntPolynomial_ifft_16_2(hipfftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
cout << "IntPolynomial_ifft_16_2" << endl;
/*
if(bitSize == 16){
cudaFFTProcessorTestTest_2_16.execute_reverse_int(result, p->coefs);
} else if (bitSize == 32) {
cudaFFTProcessorTestTest_2_32.execute_reverse_int(result, p->coefs);
}*/
// hipDeviceSynchronize();
}
EXPORT void IntPolynomial_ifft_16_2_Coalesce(hipfftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
// cout << "IntPolynomial_ifft_16_2_Coalesce" << endl;
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_coal_2_16.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_coal_2_32.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 8) {
// cudaFFTProcessorTestTest_general_coal_2_8.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_coal_2_1.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 4) {
// cudaFFTProcessorTestTest_general_coal_2_4.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 24) {
// cudaFFTProcessorTestTest_general_coal_2_24.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 2) {
// cudaFFTProcessorTestTest_general_coal_2_2.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 6) {
// cudaFFTProcessorTestTest_general_coal_2_6.execute_reverse_int(result, p->coefs);
// } else {
cout << "IntPolynomial_ifft_16_2_Coalesce: " << bitSize << endl;
// }
// hipDeviceSynchronize();
}
//EXPORT void IntPolynomial_ifft_16_2_Coalesce_one_out(hipfftDoubleComplex* result, int nOutputs, int bitSize, IntPolynomial* p) {
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_coal_2_8.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_coal_1_1.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce: " << bitSize << endl;
// }
//// else if (bitSize == 32) {
//// cudaFFTProcessorTestTest_general_coal_2_32.execute_reverse_int(result, p->coefs);
//// } else if (bitSize == 8) {
//// cudaFFTProcessorTestTest_general_coal_2_8.execute_reverse_int(result, p->coefs);
//
//// else if (bitSize == 4) {
//// cudaFFTProcessorTestTest_general_coal_2_4.execute_reverse_int(result, p->coefs);
//// } else if (bitSize == 24) {
//// cudaFFTProcessorTestTest_general_coal_2_24.execute_reverse_int(result, p->coefs);
//// } else if (bitSize == 2) {
//// cudaFFTProcessorTestTest_general_coal_2_2.execute_reverse_int(result, p->coefs);
//// }
//
//}
EXPORT void IntPolynomial_ifft_16_2_Coalesce_vector(hipfftDoubleComplex* result, int vLength, int bitSize, const IntPolynomial* p) {
// if (bitSize == 16) {
// if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8.execute_reverse_int(result, p->coefs);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4.execute_reverse_int(result, p->coefs);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2.execute_reverse_int(result, p->coefs);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32.execute_reverse_int(result, p->coefs);
// } else if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16.execute_reverse_int(result, p->coefs);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 8) {
// if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_8.execute_reverse_int(result, p->coefs);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_8.execute_reverse_int(result, p->coefs);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_8.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 1) {
// if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 24) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_24_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 12) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_12_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 6) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_6_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 3) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_3_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 64) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_64_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 128) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_128_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 512) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_512_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 4096) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4096_1.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else {
cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// hipDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_16(TorusPolynomial* result, const LagrangeHalfCPolynomial* p, int startIndex,
int endIndex, int bitSize) {
cout << "TorusPolynomial_fft_16" << endl;
/*
if(bitSize == 16){
cudaFFTProcessorTestTest_16.execute_direct_Torus32(&(result->coefsT[startIndex]),
((LagrangeHalfCPolynomial_IMPL*)p)->coefsC);
} else if (bitSize == 32) {
cudaFFTProcessorTestTest_32.execute_direct_Torus32(&(result->coefsT[startIndex]),
((LagrangeHalfCPolynomial_IMPL*)p)->coefsC);
}*/
// hipDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_gpu(TorusPolynomial* result, hipfftDoubleComplex *source, int bitSize, int N, int Ns2) {
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_16.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_32.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 1) {
// cudaFFTProcessorTestTest_general_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 8) {
// cudaFFTProcessorTestTest_general_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 4) {
// cudaFFTProcessorTestTest_general_4.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 2) {
// cudaFFTProcessorTestTest_general_2.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 48) {
// cudaFFTProcessorTestTest_general_48.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 24) {
// cudaFFTProcessorTestTest_general_24.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 12) {
// cudaFFTProcessorTestTest_general_12.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 6) {
// cudaFFTProcessorTestTest_general_6.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 3) {
// cudaFFTProcessorTestTest_general_3.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 64) {
// cudaFFTProcessorTestTest_general_64.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 80) {
// cudaFFTProcessorTestTest_general_80.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 128) {
// cudaFFTProcessorTestTest_general_128.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 256) {
// cudaFFTProcessorTestTest_general_256.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 512) {
// cudaFFTProcessorTestTest_general_512.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 1024) {
// cudaFFTProcessorTestTest_general_1024.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 2048) {
// cudaFFTProcessorTestTest_general_2048.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
cout << "TorusPolynomial_fft_gpu: " << bitSize << endl;
// }
// hipDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_gpu_2(Torus32 *result, hipfftDoubleComplex *source, int nOutputs, int bitSize,
int N, int Ns2) {
// cout << "bitSize: " << bitSize << endl;
// if (nOutputs == 2) {
// if (bitSize == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_coal_2_32.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 8) {
// cudaFFTProcessorTestTest_general_coal_2_8.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_coal_2_1.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 4) {
// cudaFFTProcessorTestTest_general_coal_2_4.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 24) {
// cudaFFTProcessorTestTest_general_coal_2_24.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 2) {
// cudaFFTProcessorTestTest_general_coal_2_2.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 6) {
// cudaFFTProcessorTestTest_general_coal_2_6.execute_direct_Torus32_gpu(result, source);
// } else {
// cout << " TorusPolynomial_fft_gpu_2: in" << bitSize << " " << nOutputs << endl;
// }
// } else {
cout << " TorusPolynomial_fft_gpu_2: out" << bitSize << " " << nOutputs << endl;
// }
// hipDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_gpu_16_2_Coalesce_vector(TorusPolynomial* result, hipfftDoubleComplex *source,
int vLength, int nOutputs, int bitSize, int N, int Ns2) {
// if(bitSize == 16) {
// if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
// cout << "TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 8) {
// if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
// cout << "TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 1) {
// if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 24) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_24_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 12) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_12_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 6) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_6_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 3) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_3_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 64) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_64_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 128) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_128_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 512) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_512_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 4096) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4096_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
// cout << "TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else {
cout << "Outer: TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << " " << nOutputs << endl;
// }
// hipDeviceSynchronize();
} | 7d7a5b57bb0d6ff164aed03c0b01dee2b57403b7.cu | #include <complex>
//#define complex _Complex
#include <fftw3.h>
#include "polynomials.h"
#include "lagrangehalfc_impl.h"
#include <cassert>
#include <cmath>
#include "cudaFFTTest.h"
#include <iostream>
#include <inttypes.h>
#include <stdio.h>
#include <cstdint>
using namespace std;
typedef std::complex<double> cplx;
#define BIT_16 16
#define BIT_32 32
//global
/*
cudaFFTProcessorTest cudaFFTProcessorTestTest_16(16);
cudaFFTProcessorTest cudaFFTProcessorTestTest_32(32);
cudaFFTProcessorTest_2 cudaFFTProcessorTestTest_2_16(16);
cudaFFTProcessorTest_2 cudaFFTProcessorTestTest_2_32(32);
cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_4(1024, 16, 1024);//for 4 bit//coalescing
cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_8(1024, 32, 1024);//for 8 bit//coalescing
cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_32(1024, 128, 1024);//for 32 bit//coalescing
*/
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_1(1024, 4, 1024);//for 1 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_2(1024, 8, 1024);//for 1 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_3(1024, 12, 1024);//for 1 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_4(1024, 16, 1024);//for 4 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_6(1024, 24, 1024);//for 6 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_8(1024, 32, 1024);//for 8 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_12(1024, 48, 1024);//for 12 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_16(1024, 64, 1024);//for 16 bit//coalescing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_24(1024, 96, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_32(1024, 128, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_48(1024, 192, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_64(1024, 256, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_80(1024, 320, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_128(1024, 512, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_256(1024, 1024, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_512(1024, 2048, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_1024(1024, 4096, 1024);
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_2048(1024, 8192, 1024);
//int nBits = 24;
//n bit single gate testing
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_bBitsTesting(1024, nBits * 4, 1024);//for nBit bit//coalescing
//int nOuts = 2;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_1(1024, nOuts, 1, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_1_1(1024, 1, 1, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_2(1024, nOuts, 2, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_4(1024, nOuts, 4, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_6(1024, nOuts, 6, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_8(1024, nOuts, 8, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16(1024, nOuts, 16, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_1_16(1024, 1, 16, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_24(1024, nOuts, 24, 4, 1024, 2);//for 16 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_32(1024, nOuts, 32, 4, 1024, 2);//for 32 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_bBitsTesting_2_output(1024, nOuts, 24, 4, 1024, 2);//(1024, nBits * nOuts * 4, 1024);//for nBit bit//coalescing
//for vector operations
//int vLen = 8;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_8(1024, nOuts, 8, BIT_16, 4, 1024, 4);//8 numbers each 16 bit//coalescing with 2 output
//// vLen = 4;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4(1024, nOuts, 4, BIT_16, 4, 1024, 4);//4 numbers each 16 bit//coalescing with 2 output
////vLen = 2;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2(1024, nOuts, 2, BIT_16, 4, 1024, 4);//2 numbers each 16 bit//coalescing with 2 output
////vLen = 1;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1(1024, nOuts, 1, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 32;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_32(1024, nOuts, 32, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 16;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_16(1024, nOuts, 16, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 16;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_256(1024, nOuts, 256, BIT_16, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
//vLen = 16
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_16_32(1024, nOuts, 16, BIT_32, 4, 1024, 4);//16 numbers each 32 bit//coalescing with 2 output
////vLen = 8
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_8_32(1024, nOuts, 8, BIT_32, 4, 1024, 4);//8 numbers each 32 bit//coalescing with 2 output
////vLen = 4
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4_32(1024, nOuts, 4, BIT_32, 4, 1024, 4);//4 numbers each 32 bit//coalescing with 2 output
////vLen = 2
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2_32(1024, nOuts, 2, BIT_32, 4, 1024, 4);//2 numbers each 32 bit//coalescing with 2 output
////vLen = 1
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1_32(1024, nOuts, 1, BIT_32, 4, 1024, 4);//1 numbers each 32 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_p5_32(1024, 128, 1024);//0.5 numbers each 32 bit//coalescing with 2 output
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_p25_32(1024, 64, 1024);//0.25 numbers each 32 bit//coalescing with 2 output
//nbits = 8
//vLen = 4;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4_8(1024, nOuts, 4, 8, 4, 1024, 4);//4 numbers each 16 bit//coalescing with 2 output
////vLen = 2;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2_8(1024, nOuts, 2, 8, 4, 1024, 4);//2 numbers each 16 bit//coalescing with 2 output
////vLen = 1;
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1_8(1024, nOuts, 1, 8, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
//
////nbits = 1
////vLen = 16
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_16_1(1024, nOuts, 16, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 8
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_8_1(1024, nOuts, 8, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 4
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4_1(1024, nOuts, 4, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 2
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_2_1(1024, nOuts, 2, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 1
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_1_1(1024, nOuts, 1, 1, 4, 1024, 4);//1 numbers each 16 bit//coalescing with 2 output
////vLen = 24
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_24_1(1024, nOuts, 24, 1, 4, 1024, 4);
////vLen = 12
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_12_1(1024, nOuts, 12, 1, 4, 1024, 4);
////vLen = 6
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_6_1(1024, nOuts, 6, 1, 4, 1024, 4);
////vLen = 3
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_3_1(1024, nOuts, 3, 1, 4, 1024, 4);
////vLen = 32
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_32_1(1024, nOuts, 32, 1, 4, 1024, 4);
////vLen = 64
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_64_1(1024, nOuts, 64, 1, 4, 1024, 4);
////vLen = 128
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_128_1(1024, nOuts, 128, 1, 4, 1024, 4);
////vLen = 256
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_256_1(1024, nOuts, 256, 1, 4, 1024, 4);
////vLen = 512
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_512_1(1024, nOuts, 512, 1, 4, 1024, 4);
//vLen = 4096
//cudaFFTProcessorTest_general cudaFFTProcessorTestTest_general_coal_2_16_vector_4096_1(1024, nOuts, 4096, 1, 4, 1024, 4);
FFT_Processor_fftw::FFT_Processor_fftw(const int N): _2N(2*N),N(N),Ns2(N/2) {
rev_in = (double*) malloc(sizeof(double) * _2N);
out = (double*) malloc(sizeof(double) * _2N);
rev_out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * (N+1));
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * (N+1));
rev_p = fftw_plan_dft_r2c_1d(_2N, rev_in, rev_out, FFTW_ESTIMATE);
p = fftw_plan_dft_c2r_1d(_2N, in, out, FFTW_ESTIMATE);
omegaxminus1 = new cplx[_2N];
for (int x=0; x<_2N; x++) {
omegaxminus1[x]=cplx(cos(x*M_PI/N)-1.,-sin(x*M_PI/N)); // instead of cos(x*M_PI/N)-1. + sin(x*M_PI/N) * I
//exp(i.x.pi/N)-1
}
}
void FFT_Processor_fftw::execute_reverse_int(cplx* res, const int* a) {
cplx* rev_out_cplx = (cplx*) rev_out; //fftw_complex and cplx are layout-compatible
for (int i=0; i<N; i++) rev_in[i]=a[i]/2.;
for (int i=0; i<N; i++) rev_in[N+i]=-rev_in[i];
fftw_execute(rev_p);
for (int i=0; i<Ns2; i++) res[i]=rev_out_cplx[2*i+1];
for (int i=0; i<=Ns2; i++) assert(abs(rev_out_cplx[2*i])<1e-20);
}
void FFT_Processor_fftw::execute_reverse_torus32(cplx* res, const Torus32* a) {
static const double _2pm33 = 1./double(INT64_C(1)<<33);
int32_t* aa = (int32_t*) a;
cplx* rev_out_cplx = (cplx*) rev_out; //fftw_complex and cplx are layout-compatible
for (int i=0; i<N; i++) rev_in[i]=aa[i]*_2pm33;
for (int i=0; i<N; i++) rev_in[N+i]=-rev_in[i];
fftw_execute(rev_p);
for (int i=0; i<Ns2; i++) res[i]=rev_out_cplx[2*i+1];
for (int i=0; i<=Ns2; i++) assert(abs(rev_out_cplx[2*i])<1e-20);
}
void FFT_Processor_fftw::execute_direct_Torus32(Torus32* res, const cplx* a) {
static const double _2p32 = double(INT64_C(1)<<32);
static const double _1sN = double(1)/double(N);
cplx* in_cplx = (cplx*) in; //fftw_complex and cplx are layout-compatible
for (int i=0; i<=Ns2; i++) in_cplx[2*i]=0;
for (int i=0; i<Ns2; i++) in_cplx[2*i+1]=a[i];
fftw_execute(p);
for (int i=0; i<N; i++) {
res[i]=Torus32(int64_t(out[i] * _1sN * _2p32));//
}
//pas besoin du fmod... Torus32(int64_t(fmod(rev_out[i]*_1sN,1.)*_2p32));
for (int i=0; i<N; i++) assert(fabs(out[N+i]+out[i])<1e-20);
}
FFT_Processor_fftw::~FFT_Processor_fftw() {
fftw_destroy_plan(p);
fftw_destroy_plan(rev_p);
fftw_free(in); fftw_free(rev_out);
free(rev_in); free(out);
delete[] omegaxminus1;
}
/**
* FFT functions
*/
EXPORT void IntPolynomial_ifft(LagrangeHalfCPolynomial* result, const IntPolynomial* p) {
fp1024_fftw.execute_reverse_int(((LagrangeHalfCPolynomial_IMPL*)result)->coefsC, p->coefs);
}
EXPORT void TorusPolynomial_ifft(LagrangeHalfCPolynomial* result, const TorusPolynomial* p) {
fp1024_fftw.execute_reverse_torus32(((LagrangeHalfCPolynomial_IMPL*)result)->coefsC, p->coefsT);
}
EXPORT void TorusPolynomial_fft(TorusPolynomial* result, const LagrangeHalfCPolynomial* p) {
fp1024_fftw.execute_direct_Torus32(result->coefsT, ((LagrangeHalfCPolynomial_IMPL*)p)->coefsC);
}
EXPORT void IntPolynomial_ifft_16(cufftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
cout << "IntPolynomial_ifft_16" << endl;
/*
if (bitSize == 16) {
cudaFFTProcessorTestTest_16.execute_reverse_int(result, p->coefs);
} else if(bitSize == 32) {
cudaFFTProcessorTestTest_32.execute_reverse_int(result, p->coefs);
}
*/
// cudaDeviceSynchronize();
}
EXPORT void IntPolynomial_ifft_16_Coalesce(cufftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_16.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_32.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_1.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 8) {
// cudaFFTProcessorTestTest_general_8.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 4) {
// cudaFFTProcessorTestTest_general_4.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 2) {
// cudaFFTProcessorTestTest_general_2.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 48) {
// cudaFFTProcessorTestTest_general_48.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 24) {
// cudaFFTProcessorTestTest_general_24.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 12) {
// cudaFFTProcessorTestTest_general_12.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 6) {
// cudaFFTProcessorTestTest_general_6.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 3) {
// cudaFFTProcessorTestTest_general_3.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 64) {
// cudaFFTProcessorTestTest_general_64.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 128) {
// cudaFFTProcessorTestTest_general_128.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 256) {
// cudaFFTProcessorTestTest_general_256.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 512) {
// cudaFFTProcessorTestTest_general_512.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1024) {
// cudaFFTProcessorTestTest_general_1024.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 2048) {
// cudaFFTProcessorTestTest_general_2048.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 80) {
// cudaFFTProcessorTestTest_general_80.execute_reverse_int(result, p->coefs);
// } else {
cout << "IntPolynomial_ifft_16_Coalesce: " << bitSize << endl;
// }
// cudaDeviceSynchronize();
}
EXPORT void IntPolynomial_ifft_16_2(cufftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
cout << "IntPolynomial_ifft_16_2" << endl;
/*
if(bitSize == 16){
cudaFFTProcessorTestTest_2_16.execute_reverse_int(result, p->coefs);
} else if (bitSize == 32) {
cudaFFTProcessorTestTest_2_32.execute_reverse_int(result, p->coefs);
}*/
// cudaDeviceSynchronize();
}
EXPORT void IntPolynomial_ifft_16_2_Coalesce(cufftDoubleComplex* result, int bitSize, const IntPolynomial* p) {
// cout << "IntPolynomial_ifft_16_2_Coalesce" << endl;
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_coal_2_16.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_coal_2_32.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 8) {
// cudaFFTProcessorTestTest_general_coal_2_8.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_coal_2_1.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 4) {
// cudaFFTProcessorTestTest_general_coal_2_4.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 24) {
// cudaFFTProcessorTestTest_general_coal_2_24.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 2) {
// cudaFFTProcessorTestTest_general_coal_2_2.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 6) {
// cudaFFTProcessorTestTest_general_coal_2_6.execute_reverse_int(result, p->coefs);
// } else {
cout << "IntPolynomial_ifft_16_2_Coalesce: " << bitSize << endl;
// }
// cudaDeviceSynchronize();
}
//EXPORT void IntPolynomial_ifft_16_2_Coalesce_one_out(cufftDoubleComplex* result, int nOutputs, int bitSize, IntPolynomial* p) {
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_coal_2_8.execute_reverse_int(result, p->coefs);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_coal_1_1.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce: " << bitSize << endl;
// }
//// else if (bitSize == 32) {
//// cudaFFTProcessorTestTest_general_coal_2_32.execute_reverse_int(result, p->coefs);
//// } else if (bitSize == 8) {
//// cudaFFTProcessorTestTest_general_coal_2_8.execute_reverse_int(result, p->coefs);
//
//// else if (bitSize == 4) {
//// cudaFFTProcessorTestTest_general_coal_2_4.execute_reverse_int(result, p->coefs);
//// } else if (bitSize == 24) {
//// cudaFFTProcessorTestTest_general_coal_2_24.execute_reverse_int(result, p->coefs);
//// } else if (bitSize == 2) {
//// cudaFFTProcessorTestTest_general_coal_2_2.execute_reverse_int(result, p->coefs);
//// }
//
//}
EXPORT void IntPolynomial_ifft_16_2_Coalesce_vector(cufftDoubleComplex* result, int vLength, int bitSize, const IntPolynomial* p) {
// if (bitSize == 16) {
// if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8.execute_reverse_int(result, p->coefs);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4.execute_reverse_int(result, p->coefs);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2.execute_reverse_int(result, p->coefs);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32.execute_reverse_int(result, p->coefs);
// } else if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16.execute_reverse_int(result, p->coefs);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 8) {
// if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_8.execute_reverse_int(result, p->coefs);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_8.execute_reverse_int(result, p->coefs);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_8.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 1) {
// if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 24) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_24_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 12) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_12_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 6) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_6_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 3) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_3_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 64) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_64_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 128) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_128_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 512) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_512_1.execute_reverse_int(result, p->coefs);
// } else if (vLength == 4096) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4096_1.execute_reverse_int(result, p->coefs);
// } else {
// cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else {
cout << "IntPolynomial_ifft_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// cudaDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_16(TorusPolynomial* result, const LagrangeHalfCPolynomial* p, int startIndex,
int endIndex, int bitSize) {
cout << "TorusPolynomial_fft_16" << endl;
/*
if(bitSize == 16){
cudaFFTProcessorTestTest_16.execute_direct_Torus32(&(result->coefsT[startIndex]),
((LagrangeHalfCPolynomial_IMPL*)p)->coefsC);
} else if (bitSize == 32) {
cudaFFTProcessorTestTest_32.execute_direct_Torus32(&(result->coefsT[startIndex]),
((LagrangeHalfCPolynomial_IMPL*)p)->coefsC);
}*/
// cudaDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_gpu(TorusPolynomial* result, cufftDoubleComplex *source, int bitSize, int N, int Ns2) {
// if(bitSize == 16){
// cudaFFTProcessorTestTest_general_16.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_32.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 1) {
// cudaFFTProcessorTestTest_general_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 8) {
// cudaFFTProcessorTestTest_general_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 4) {
// cudaFFTProcessorTestTest_general_4.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 2) {
// cudaFFTProcessorTestTest_general_2.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 48) {
// cudaFFTProcessorTestTest_general_48.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 24) {
// cudaFFTProcessorTestTest_general_24.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 12) {
// cudaFFTProcessorTestTest_general_12.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 6) {
// cudaFFTProcessorTestTest_general_6.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 3) {
// cudaFFTProcessorTestTest_general_3.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 64) {
// cudaFFTProcessorTestTest_general_64.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 80) {
// cudaFFTProcessorTestTest_general_80.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 128) {
// cudaFFTProcessorTestTest_general_128.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 256) {
// cudaFFTProcessorTestTest_general_256.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 512) {
// cudaFFTProcessorTestTest_general_512.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 1024) {
// cudaFFTProcessorTestTest_general_1024.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if(bitSize == 2048) {
// cudaFFTProcessorTestTest_general_2048.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
cout << "TorusPolynomial_fft_gpu: " << bitSize << endl;
// }
// cudaDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_gpu_2(Torus32 *result, cufftDoubleComplex *source, int nOutputs, int bitSize,
int N, int Ns2) {
// cout << "bitSize: " << bitSize << endl;
// if (nOutputs == 2) {
// if (bitSize == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 32) {
// cudaFFTProcessorTestTest_general_coal_2_32.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 8) {
// cudaFFTProcessorTestTest_general_coal_2_8.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 1) {
// cudaFFTProcessorTestTest_general_coal_2_1.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 4) {
// cudaFFTProcessorTestTest_general_coal_2_4.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 24) {
// cudaFFTProcessorTestTest_general_coal_2_24.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 2) {
// cudaFFTProcessorTestTest_general_coal_2_2.execute_direct_Torus32_gpu(result, source);
// } else if (bitSize == 6) {
// cudaFFTProcessorTestTest_general_coal_2_6.execute_direct_Torus32_gpu(result, source);
// } else {
// cout << " TorusPolynomial_fft_gpu_2: in" << bitSize << " " << nOutputs << endl;
// }
// } else {
cout << " TorusPolynomial_fft_gpu_2: out" << bitSize << " " << nOutputs << endl;
// }
// cudaDeviceSynchronize();
}
EXPORT void TorusPolynomial_fft_gpu_16_2_Coalesce_vector(TorusPolynomial* result, cufftDoubleComplex *source,
int vLength, int nOutputs, int bitSize, int N, int Ns2) {
// if(bitSize == 16) {
// if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
// cout << "TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 8) {
// if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_8.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
// cout << "TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else if (bitSize == 1) {
// if (vLength == 16) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_16_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 8) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_8_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 4) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 2) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_2_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 1) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_1_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 24) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_24_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 12) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_12_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 6) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_6_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 3) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_3_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 32) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_32_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 64) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_64_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 128) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_128_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 256) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_256_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 512) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_512_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else if (vLength == 4096) {
// cudaFFTProcessorTestTest_general_coal_2_16_vector_4096_1.execute_direct_Torus32_gpu(result->coefsT, source);
// } else {
// cout << "TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << endl;
// }
// } else {
cout << "Outer: TorusPolynomial_fft_gpu_16_2_Coalesce_vector: " << bitSize << " " << vLength << " " << nOutputs << endl;
// }
// cudaDeviceSynchronize();
} |
833118bd92d645342c4305be177645136b591ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DFSPH_static_variables_structure_cuda.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
//Static Variable Structure kernels
__global__ void initCurand_kernel(hiprandState_t *state) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= 1) { return; }
hiprand_init(1234, 0, 0, state);
}
SVS_CU* SVS_CU::get(bool free) {
static SVS_CU* v = NULL;
if (free) {
if (v != NULL) {
delete v; v = NULL;
}
}
else {
if (v == NULL) {
v = new SVS_CU();
}
}
return v;
}
SVS_CU::SVS_CU() {
hipMallocManaged(&(avg_density_err), sizeof(RealCuda));
shuffle_index = NULL;
hipMalloc(&(curand_state), sizeof(hiprandState_t));
initCurand_kernel << <1, 1 >> > (curand_state);
gpuErrchk(hipDeviceSynchronize());
hipMallocManaged(&(count_rmv_particles), sizeof(int));
hipMallocManaged(&(count_possible_particles), sizeof(int));
hipMallocManaged(&(count_moved_particles), sizeof(int));
hipMallocManaged(&(count_invalid_position), sizeof(int));
hipMallocManaged(&(column_max_height), CELL_ROW_LENGTH*CELL_ROW_LENGTH * sizeof(RealCuda));
hipMallocManaged(&(tagged_particles_count), sizeof(int));
hipMallocManaged(&(count_created_particles), sizeof(int));
hipMallocManaged(&(force_cuda), sizeof(Vector3d));
hipMallocManaged(&(moment_cuda), sizeof(Vector3d));
hipMallocManaged(&(pt_cuda), sizeof(Vector3d));
}
SVS_CU::~SVS_CU() {
CUDA_FREE_PTR(avg_density_err);
CUDA_FREE_PTR(shuffle_index);
CUDA_FREE_PTR(curand_state);
CUDA_FREE_PTR(count_rmv_particles);
CUDA_FREE_PTR(count_possible_particles);
CUDA_FREE_PTR(count_moved_particles);
CUDA_FREE_PTR(count_invalid_position);
CUDA_FREE_PTR(column_max_height);
CUDA_FREE_PTR(tagged_particles_count);
CUDA_FREE_PTR(count_created_particles);
CUDA_FREE_PTR(force_cuda);
CUDA_FREE_PTR(moment_cuda);
CUDA_FREE_PTR(pt_cuda);
}
void SVS_CU::particleNumberChanged() {
CUDA_FREE_PTR(get()->shuffle_index);
} | 833118bd92d645342c4305be177645136b591ea9.cu | #include "DFSPH_static_variables_structure_cuda.h"
#include <curand.h>
#include <curand_kernel.h>
//Static Variable Structure kernels
__global__ void initCurand_kernel(curandState *state) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= 1) { return; }
curand_init(1234, 0, 0, state);
}
SVS_CU* SVS_CU::get(bool free) {
static SVS_CU* v = NULL;
if (free) {
if (v != NULL) {
delete v; v = NULL;
}
}
else {
if (v == NULL) {
v = new SVS_CU();
}
}
return v;
}
SVS_CU::SVS_CU() {
cudaMallocManaged(&(avg_density_err), sizeof(RealCuda));
shuffle_index = NULL;
cudaMalloc(&(curand_state), sizeof(curandState));
initCurand_kernel << <1, 1 >> > (curand_state);
gpuErrchk(cudaDeviceSynchronize());
cudaMallocManaged(&(count_rmv_particles), sizeof(int));
cudaMallocManaged(&(count_possible_particles), sizeof(int));
cudaMallocManaged(&(count_moved_particles), sizeof(int));
cudaMallocManaged(&(count_invalid_position), sizeof(int));
cudaMallocManaged(&(column_max_height), CELL_ROW_LENGTH*CELL_ROW_LENGTH * sizeof(RealCuda));
cudaMallocManaged(&(tagged_particles_count), sizeof(int));
cudaMallocManaged(&(count_created_particles), sizeof(int));
cudaMallocManaged(&(force_cuda), sizeof(Vector3d));
cudaMallocManaged(&(moment_cuda), sizeof(Vector3d));
cudaMallocManaged(&(pt_cuda), sizeof(Vector3d));
}
SVS_CU::~SVS_CU() {
CUDA_FREE_PTR(avg_density_err);
CUDA_FREE_PTR(shuffle_index);
CUDA_FREE_PTR(curand_state);
CUDA_FREE_PTR(count_rmv_particles);
CUDA_FREE_PTR(count_possible_particles);
CUDA_FREE_PTR(count_moved_particles);
CUDA_FREE_PTR(count_invalid_position);
CUDA_FREE_PTR(column_max_height);
CUDA_FREE_PTR(tagged_particles_count);
CUDA_FREE_PTR(count_created_particles);
CUDA_FREE_PTR(force_cuda);
CUDA_FREE_PTR(moment_cuda);
CUDA_FREE_PTR(pt_cuda);
}
void SVS_CU::particleNumberChanged() {
CUDA_FREE_PTR(get()->shuffle_index);
} |
072fd95427ad82bb45b741cddd6b14e8ca13dfe5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/TensorTransformations.h> // flip
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/core/Array.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/KernelUtils.cuh>
#include <c10/util/MaybeOwned.h>
#include <THH/THHTensorInfo.cuh>
namespace at { namespace native {
static constexpr int launch_bound2 = 4;
static constexpr int launch_size_nd = 128;
template<int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, launch_bound2)
__global__ void index_elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( index_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename func_t>
void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) {
int num_indices = index_size.size();
AT_ASSERT(num_indices == index_stride.size());
AT_ASSERT(num_indices == iter.ntensors() - 2);
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
gpu_index_kernel(sub_iter, index_size, index_stride, f);
}
return;
}
auto sizes = at::detail::Array<int64_t, MAX_DIMS>(0);
auto strides = at::detail::Array<int64_t, MAX_DIMS>(0);
auto index_ptrs = at::detail::Array<char*, MAX_DIMS>(nullptr);
for (int i = 0; i < num_indices; i++) {
sizes[i] = index_size[i];
strides[i] = index_stride[i];
index_ptrs[i] = (char*)iter.data_ptr(i + 2);
}
char* out_ptr = (char*)iter.data_ptr(0);
char* in_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<3>(iter);
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) {
auto offsets = offset_calc.get(idx);
char* out_data = out_ptr + offsets[0];
char* in_data = in_ptr + offsets[1];
int64_t offset = 0;
#pragma unroll
for (int i = 0; i < num_indices; i++) {
int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]);
CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds");
if (index < 0) {
index += sizes[i];
}
offset += index * strides[i];
}
f(out_data, in_data, offset);
});
}
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
template <typename scalar_t>
void index_fill_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
scalar_t fill_val) {
if (0 == iter.numel()) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds");
if (idx < 0) {
idx += self_dim_size;
}
self_data[idx * self_dim_stride] = fill_val;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_copy_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds");
self_data[idx * self_dim_stride] = *source_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)out_data = *(scalar_t*)(in_data + offset);
});
}
template <typename scalar_t>
void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)(out_data + offset) = *(scalar_t*)in_data;
});
}
static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static void index_fill_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
const Scalar& source) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_fill_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto fill_val = source.to<scalar_t>();
auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val);
index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque);
});
}
static void index_copy_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
// See note [Writing Nondeterministic Operations]
// Nondeterministic when index contains duplicate entries
// this kernel will not be called when torch.use_deterministic_algorithms(True)
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_copy_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride);
});
}
static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_put_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) {
NoNamesGuard guard;
TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool,
"masked_select: expected BoolTensor or ByteTensor for mask");
TORCH_CHECK(self.scalar_type() == result.scalar_type(),
"masked_select(): self and result must have the same scalar type");
auto mask_temp = (mask.dim() == 0)
? c10::MaybeOwned<Tensor>::owned(mask.unsqueeze(0))
: c10::MaybeOwned<Tensor>::borrowed(mask);
auto self_temp = (self.dim() == 0)
? c10::MaybeOwned<Tensor>::owned(self.unsqueeze(0))
: c10::MaybeOwned<Tensor>::borrowed(self);
// Cannot reassign to mask_temp and self_temp here! if they are
// owning and expand_outplace returns a borrow, the returned borrow
// would dangle.
auto mask_self_expanded = expand_outplace(*mask_temp, *self_temp);
at::native::index_out(result, *std::get<1>(mask_self_expanded), c10::List<c10::optional<at::Tensor>>({*std::get<0>(std::move(mask_self_expanded))}));
return result;
}
Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) {
namedinference::compute_broadcast_outnames(self, mask);
Tensor result = at::empty({0}, self.options());
return masked_select_out_cuda_impl(result, self, mask);
}
Tensor & masked_select_out_cuda(const Tensor & self, const Tensor & mask, Tensor & result) {
namedinference::compute_broadcast_outnames(self, mask);
return masked_select_out_cuda_impl(result, self, mask);
}
template <typename scalar_t, typename index_t, typename func_t>
void cuda_take_put_kernel(
TensorIterator& iter,
const Tensor& indexed,
const func_t& f) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f);
}
return;
}
const auto numel = indexed.numel();
const bool is_contiguous = indexed.is_contiguous();
char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2>(iter);
using uindex_t = std::make_unsigned_t<index_t>;
// OffsetCalculator needs the sizes and strides reveresed
const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend());
const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend());
const auto* indexed_strides_data = indexed_strides.data();
const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(),
indexed_sizes.data(),
&indexed_strides_data);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]);
const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds");
index_t offset = static_cast<index_t>(idx);
if (offset < 0) {
offset += numel;
}
if (!is_contiguous) {
offset = offset_indexed.get(offset)[0];
}
f(iterated, offset);
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void put_kernel(TensorIterator& iter, const Tensor& output, const bool accumulate) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] {
// Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd`
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long,
"put_cuda_index", [&] {
auto* __restrict__ indexed_ptr = output.template data_ptr<scalar_t>();
if (accumulate) {
index_t numel = output.numel();
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated);
});
}
else {
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
indexed_ptr[offset] = iterated;
});
}
});
});
}
void take_kernel(
TensorIterator& iter,
const Tensor& input) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] {
// Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long,
"take_cuda_index", [&] {
const auto* __restrict__ indexed_ptr = input.template data_ptr<scalar_t>();
cuda_take_put_kernel<scalar_t, index_t>(iter, input,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
iterated = indexed_ptr[offset];
});
});
});
}
namespace {
__global__ void masked_scatter_size_check(int64_t *totalElements, int64_t srcSize) {
CUDA_KERNEL_ASSERT(*totalElements <= srcSize);
}
template <typename mask_t>
void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){
auto srcSize = source.numel();
if (self.numel() == 0) {
return;
}
auto mask_cont = mask.contiguous();
// Use a prefix sum to determine the output locations of the masked elements
auto maskPrefixSum = at::empty_like(mask_cont, mask.options().dtype(kLong));
at::cuda::cub::exclusive_scan(
mask_cont.data_ptr<mask_t>(), maskPrefixSum.data_ptr<int64_t>(),
[]__device__(int64_t a, int64_t b) { return a + b; }, int64_t(0),
mask_cont.numel());
// Determine our output size
auto totalElements = (at::_unsafe_view(maskPrefixSum, -1)[-1] + at::_unsafe_view(mask_cont, -1)[-1]);
// Asynchronously check that the number of `1` elements present in the mask
// must be <= the number of elements available in `src`.
hipLaunchKernelGGL(( masked_scatter_size_check), dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
totalElements.data_ptr<int64_t>(), srcSize);
C10_HIP_KERNEL_LAUNCH_CHECK();
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
auto source_contig = source.contiguous();
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(mask_cont)
.add_input(maskPrefixSum)
.build();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Bool,
ScalarType::BFloat16,
ScalarType::Half,
self.scalar_type(),
"masked_scatter_",
[&]() {
auto source_ptr = source_contig.data_ptr<scalar_t>();
gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t {
if (mask) {
return source_ptr[maskPrefixSum];
}
return a;
});
hipGetLastError();
});
}
} // anonymous namespace
Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) {
at::assert_no_internal_overlap(self);
TORCH_CHECK(
self.scalar_type() == source.scalar_type(),
"masked_scatter: expected self and source to have same dtypes but got",
self.scalar_type(),
" and ",
source.scalar_type());
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_scatter_");
if (b_mask->dtype() == ScalarType::Byte) {
TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
}
auto mask_dtype = b_mask->scalar_type();
if (mask_dtype == ScalarType::Bool) {
masked_scatter_cuda_impl<bool>(self, *b_mask, source);
} else {
masked_scatter_cuda_impl<uint8_t>(self, *b_mask, source);
}
return self;
}
template <typename scalar_t>
void flip_kernel_impl(TensorIterator& iter) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
flip_kernel_impl<scalar_t>(sub_iter);
}
return;
}
char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter);
auto loop = [=]C10_DEVICE(const int i) {
const auto offsets = offset_calc.get(i);
// offsets can be negative here, but it's fine
scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]);
const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]);
*out_data = *in_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void flip_kernel(TensorIterator& iter, const bool quantized) {
if (quantized) {
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "flip_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
}
}
REGISTER_DISPATCH(index_stub, &index_kernel);
REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel);
REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel);
REGISTER_DISPATCH(index_put_stub, &index_put_kernel);
REGISTER_DISPATCH(put_stub, &put_kernel);
REGISTER_DISPATCH(take_stub, &take_kernel);
REGISTER_DISPATCH(flip_stub, &flip_kernel);
}} // namespace at::native
| 072fd95427ad82bb45b741cddd6b14e8ca13dfe5.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/TensorTransformations.h> // flip
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <c10/util/MaybeOwned.h>
#include <THC/THCTensorInfo.cuh>
namespace at { namespace native {
static constexpr int launch_bound2 = 4;
static constexpr int launch_size_nd = 128;
template<int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, launch_bound2)
__global__ void index_elementwise_kernel(int N, func_t f) {
int tid = threadIdx.x;
int nv = nt * vt;
int idx = nv * blockIdx.x + tid;
#pragma unroll
for (int i = 0; i < vt; i++) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template<int nt, int vt, typename func_t>
static void launch_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::cuda::getCurrentCUDAStream();
index_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename func_t>
void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) {
int num_indices = index_size.size();
AT_ASSERT(num_indices == index_stride.size());
AT_ASSERT(num_indices == iter.ntensors() - 2);
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
gpu_index_kernel(sub_iter, index_size, index_stride, f);
}
return;
}
auto sizes = at::detail::Array<int64_t, MAX_DIMS>(0);
auto strides = at::detail::Array<int64_t, MAX_DIMS>(0);
auto index_ptrs = at::detail::Array<char*, MAX_DIMS>(nullptr);
for (int i = 0; i < num_indices; i++) {
sizes[i] = index_size[i];
strides[i] = index_stride[i];
index_ptrs[i] = (char*)iter.data_ptr(i + 2);
}
char* out_ptr = (char*)iter.data_ptr(0);
char* in_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<3>(iter);
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) {
auto offsets = offset_calc.get(idx);
char* out_data = out_ptr + offsets[0];
char* in_data = in_ptr + offsets[1];
int64_t offset = 0;
#pragma unroll
for (int i = 0; i < num_indices; i++) {
int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]);
CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds");
if (index < 0) {
index += sizes[i];
}
offset += index * strides[i];
}
f(out_data, in_data, offset);
});
}
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
template <typename scalar_t>
void index_fill_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
scalar_t fill_val) {
if (0 == iter.numel()) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds");
if (idx < 0) {
idx += self_dim_size;
}
self_data[idx * self_dim_stride] = fill_val;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_copy_kernel_impl(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride);
}
return;
}
char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]);
auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds");
self_data[idx * self_dim_stride] = *source_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
template <typename scalar_t>
void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)out_data = *(scalar_t*)(in_data + offset);
});
}
template <typename scalar_t>
void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) {
*(scalar_t*)(out_data + offset) = *(scalar_t*)in_data;
});
}
static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static void index_fill_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride,
const Scalar& source) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_fill_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto fill_val = source.to<scalar_t>();
auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val);
index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque);
});
}
static void index_copy_kernel(
TensorIterator& iter,
int64_t dim,
int64_t self_dim_size,
int64_t self_dim_stride) {
// See note [Writing Nondeterministic Operations]
// Nondeterministic when index contains duplicate entries
// this kernel will not be called when torch.use_deterministic_algorithms(True)
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "index_copy_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride);
});
}
static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) {
TORCH_CHECK(!accumulate, "index_put does not support accumulate=true");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
index_put_kernel_impl<dtype>(iter, index_size, index_stride);
});
}
static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) {
NoNamesGuard guard;
TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool,
"masked_select: expected BoolTensor or ByteTensor for mask");
TORCH_CHECK(self.scalar_type() == result.scalar_type(),
"masked_select(): self and result must have the same scalar type");
auto mask_temp = (mask.dim() == 0)
? c10::MaybeOwned<Tensor>::owned(mask.unsqueeze(0))
: c10::MaybeOwned<Tensor>::borrowed(mask);
auto self_temp = (self.dim() == 0)
? c10::MaybeOwned<Tensor>::owned(self.unsqueeze(0))
: c10::MaybeOwned<Tensor>::borrowed(self);
// Cannot reassign to mask_temp and self_temp here! if they are
// owning and expand_outplace returns a borrow, the returned borrow
// would dangle.
auto mask_self_expanded = expand_outplace(*mask_temp, *self_temp);
at::native::index_out(result, *std::get<1>(mask_self_expanded), c10::List<c10::optional<at::Tensor>>({*std::get<0>(std::move(mask_self_expanded))}));
return result;
}
Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) {
namedinference::compute_broadcast_outnames(self, mask);
Tensor result = at::empty({0}, self.options());
return masked_select_out_cuda_impl(result, self, mask);
}
Tensor & masked_select_out_cuda(const Tensor & self, const Tensor & mask, Tensor & result) {
namedinference::compute_broadcast_outnames(self, mask);
return masked_select_out_cuda_impl(result, self, mask);
}
template <typename scalar_t, typename index_t, typename func_t>
void cuda_take_put_kernel(
TensorIterator& iter,
const Tensor& indexed,
const func_t& f) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f);
}
return;
}
const auto numel = indexed.numel();
const bool is_contiguous = indexed.is_contiguous();
char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2>(iter);
using uindex_t = std::make_unsigned_t<index_t>;
// OffsetCalculator needs the sizes and strides reveresed
const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend());
const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend());
const auto* indexed_strides_data = indexed_strides.data();
const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(),
indexed_sizes.data(),
&indexed_strides_data);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]);
const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds");
index_t offset = static_cast<index_t>(idx);
if (offset < 0) {
offset += numel;
}
if (!is_contiguous) {
offset = offset_indexed.get(offset)[0];
}
f(iterated, offset);
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void put_kernel(TensorIterator& iter, const Tensor& output, const bool accumulate) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] {
// Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd`
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long,
"put_cuda_index", [&] {
auto* __restrict__ indexed_ptr = output.template data_ptr<scalar_t>();
if (accumulate) {
index_t numel = output.numel();
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated);
});
}
else {
cuda_take_put_kernel<scalar_t, index_t>(iter, output,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
indexed_ptr[offset] = iterated;
});
}
});
});
}
void take_kernel(
TensorIterator& iter,
const Tensor& input) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] {
// Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented
AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long,
"take_cuda_index", [&] {
const auto* __restrict__ indexed_ptr = input.template data_ptr<scalar_t>();
cuda_take_put_kernel<scalar_t, index_t>(iter, input,
[indexed_ptr] __device__(scalar_t& iterated, const index_t offset) {
iterated = indexed_ptr[offset];
});
});
});
}
namespace {
__global__ void masked_scatter_size_check(int64_t *totalElements, int64_t srcSize) {
CUDA_KERNEL_ASSERT(*totalElements <= srcSize);
}
template <typename mask_t>
void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){
auto srcSize = source.numel();
if (self.numel() == 0) {
return;
}
auto mask_cont = mask.contiguous();
// Use a prefix sum to determine the output locations of the masked elements
auto maskPrefixSum = at::empty_like(mask_cont, mask.options().dtype(kLong));
at::cuda::cub::exclusive_scan(
mask_cont.data_ptr<mask_t>(), maskPrefixSum.data_ptr<int64_t>(),
[]__device__(int64_t a, int64_t b) { return a + b; }, int64_t(0),
mask_cont.numel());
// Determine our output size
auto totalElements = (at::_unsafe_view(maskPrefixSum, -1)[-1] + at::_unsafe_view(mask_cont, -1)[-1]);
// Asynchronously check that the number of `1` elements present in the mask
// must be <= the number of elements available in `src`.
masked_scatter_size_check<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
totalElements.data_ptr<int64_t>(), srcSize);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
auto source_contig = source.contiguous();
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(mask_cont)
.add_input(maskPrefixSum)
.build();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Bool,
ScalarType::BFloat16,
ScalarType::Half,
self.scalar_type(),
"masked_scatter_",
[&]() {
auto source_ptr = source_contig.data_ptr<scalar_t>();
gpu_kernel(
iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t {
if (mask) {
return source_ptr[maskPrefixSum];
}
return a;
});
cudaGetLastError();
});
}
} // anonymous namespace
Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) {
at::assert_no_internal_overlap(self);
TORCH_CHECK(
self.scalar_type() == source.scalar_type(),
"masked_scatter: expected self and source to have same dtypes but got",
self.scalar_type(),
" and ",
source.scalar_type());
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_scatter_");
if (b_mask->dtype() == ScalarType::Byte) {
TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
}
auto mask_dtype = b_mask->scalar_type();
if (mask_dtype == ScalarType::Bool) {
masked_scatter_cuda_impl<bool>(self, *b_mask, source);
} else {
masked_scatter_cuda_impl<uint8_t>(self, *b_mask, source);
}
return self;
}
template <typename scalar_t>
void flip_kernel_impl(TensorIterator& iter) {
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
flip_kernel_impl<scalar_t>(sub_iter);
}
return;
}
char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1));
const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter);
auto loop = [=]C10_DEVICE(const int i) {
const auto offsets = offset_calc.get(i);
// offsets can be negative here, but it's fine
scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]);
const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]);
*out_data = *in_data;
};
launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop);
}
void flip_kernel(TensorIterator& iter, const bool quantized) {
if (quantized) {
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(), "flip_cuda",
[&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
flip_kernel_impl<dtype>(iter);
});
}
}
REGISTER_DISPATCH(index_stub, &index_kernel);
REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel);
REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel);
REGISTER_DISPATCH(index_put_stub, &index_put_kernel);
REGISTER_DISPATCH(put_stub, &put_kernel);
REGISTER_DISPATCH(take_stub, &take_kernel);
REGISTER_DISPATCH(flip_stub, &flip_kernel);
}} // namespace at::native
|
1a6b686be9f4266f1cb3ec5dc33bd97c750ef7e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* radixSort.cu
* Author: Marius Rejdak
*/
#include <math.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include "utils.h"
#include "cuda_utils.h"
/*
* Modyfikacja funkcji sumy prefiksowej
* dla potrzeb sortowania pozycyjnego
*/
__global__ static void CUDA_RadixPrefixSum(
Element* __restrict__ values,
int32_t* __restrict__ values_masks,
int32_t* __restrict__ aux,
const Key mask) // Wybr bitu po ktrym nastpuje sortowanie
{
const int32_t idx = (TDIM * BID + TID) << 1;
// Kady wtek przetwarza 2 elementy jednoczenie
const int32_t tmp_in0 = (values[idx].k & mask) ? 0 : 1;
const int32_t tmp_in1 = (values[idx + 1].k & mask) ? 0 : 1;
// Wykorzystanie pamici wspdzielonej do optymalizacji
extern __shared__ int32_t shared_int32[];
shared_int32[TID] = tmp_in0 + tmp_in1;
__syncthreads();
// Sumowanie elementw do przodu
for (int32_t i = 1; i < TDIM; i <<= 1) {
const int32_t x = (i<<1)-1;
if (TID >= i && (TID & x) == x) {
shared_int32[TID] += shared_int32[TID - i];
}
__syncthreads();
}
if (TID == 0)
shared_int32[TDIM - 1] = 0;
__syncthreads();
// Sumowanie elementw wstecz
for (int32_t i = TDIM>>1; i >= 1; i >>= 1) {
int32_t x = (i<<1)-1;
if (TID >= i && (TID & x) == x) {
int32_t temp = shared_int32[TID - i];
shared_int32[TID - i] = shared_int32[TID];
shared_int32[TID] += temp;
}
__syncthreads();
}
// Zapisanie wynikw sumy prefiksowej
values_masks[idx] = shared_int32[TID];
values_masks[idx + 1] = shared_int32[TID] + tmp_in0;
// Zapisanie sumy caego bloku dla korekty kolejnych blokw
if (TID == TDIM-1)
aux[BID] = tmp_in0 + shared_int32[TID] + tmp_in1;
}
/*
* Przestawianie elementw dla wyliczonej sumy prefiksowej
*/
__global__ static void CUDA_RadixSort(
Element* __restrict__ values,
Element* __restrict__ values_sorted,
int32_t* __restrict__ values_masks_psum,
const Key mask)
{
const int32_t idx = TDIM * BID + TID;
const int32_t bdim = TDIM * BDIM;
const Element current = values[idx];
const int32_t new_idx = values_masks_psum[idx];
if (current.k & mask)
values_sorted[idx + (values_masks_psum[bdim-1]
+ ((values[bdim-1].k & mask) ? 0 : 1))
- new_idx] = current;
else
values_sorted[new_idx] = current;
}
/*
* Wywoania funkcji kernel dla sumy prefiksowej
*/
__host__ void RadixPrefixSum(Element* d_mem_values, int32_t* d_mem_masks,
const int32_t N, const Key mask)
{
int32_t *d_mem_aux;
kdim v = get_kdim(N);
// Alokacja pamici tymczasowej
gpuErrchk( hipMalloc(&d_mem_aux, v.num_blocks * sizeof(int32_t)) );
// Suma prefiksowa w poszczeglnych blokach
hipLaunchKernelGGL(( CUDA_RadixPrefixSum), dim3(v.dim_blocks),
dim3(v.num_threads>>1),
v.num_threads*sizeof(int32_t), 0,
d_mem_values, d_mem_masks, d_mem_aux, mask);
hipDeviceSynchronize();
gpuErrchk( hipPeekAtLastError() );
if (v.num_blocks > 1) {
// Korekta sum dla kolejnych blokw
SumScan_Inclusive(d_mem_aux, v.num_blocks);
hipLaunchKernelGGL(( CUDA_SumScanUpdate), dim3(v.dim_blocks), dim3(v.num_threads), 0, 0,
d_mem_masks, d_mem_aux);
hipDeviceSynchronize();
gpuErrchk( hipPeekAtLastError() );
}
hipFree(d_mem_aux);
}
/*
* Wywoania funkcji kernel sortowania pozycyjnego
* i synchronizacja z sum prefiksow
*/
__host__ void inline RadixSort(Element* d_mem_values,
Element* d_mem_sorted,
const int32_t N)
{
Element *d_v, *d_s;
int32_t *d_m;
kdim v = get_kdim(N);
// Pami tymczasowa
gpuErrchk( hipMalloc(&d_m, N * sizeof(int32_t)) );
// Sortowanie LSB
for (int16_t bit = 0; bit < sizeof(Key)*8; ++bit) {
Key mask = 1 << bit;
if (bit % 2) {
d_v = d_mem_values;
d_s = d_mem_sorted;
} else {
d_v = d_mem_sorted;
d_s = d_mem_values;
}
RadixPrefixSum(d_v, d_m, N, mask);
hipLaunchKernelGGL(( CUDA_RadixSort), dim3(v.dim_blocks), dim3(v.num_threads), 0, 0,
d_v, d_s, d_m, mask);
hipDeviceSynchronize();
gpuErrchk( hipPeekAtLastError() );
}
hipFree(d_m);
}
// program main
int main(int argc, char** argv)
{
void *h_mem, *d_mem_values, *d_mem_sorted;
h_mem = malloc(MAX_SIZE);
assert(h_mem != NULL);
gpuErrchk( hipMalloc(&d_mem_values, MAX_SIZE) );
gpuErrchk( hipMalloc(&d_mem_sorted, MAX_SIZE) );
srand(time(NULL));
printf("Radix sort\n");
printf("%s,%s,%ld,%ld\n", "size", "time", CLOCKS_PER_SEC, sizeof(Element));
for(int32_t size = MIN_SIZE; size <= MAX_SIZE; size <<= 1) {
int32_t N = size/sizeof(Element);
clock_t t1, t2, t_sum = 0;
for (int i = 0; i < NUM_PASSES; ++i) {
init_values((Element*) h_mem, N);
copy_to_device_time(d_mem_values, h_mem, size);
hipDeviceSynchronize();
t1 = clock();
RadixSort((Element*) d_mem_values, (Element*) d_mem_sorted, N);
hipDeviceSynchronize();
t2 = clock();
t_sum += t2 - t1;
gpuErrchk( hipPeekAtLastError() );
copy_to_host_time(h_mem, d_mem_sorted, size);
hipDeviceSynchronize();
assert(is_int_array_sorted((Element*) h_mem, N, false));
}
t_sum /= NUM_PASSES;
printf("%ld,%ld\n", N, t_sum);
}
hipFree(d_mem_values);
hipFree(d_mem_sorted);
free(h_mem);
return 0;
}
| 1a6b686be9f4266f1cb3ec5dc33bd97c750ef7e8.cu | /*
* radixSort.cu
* Author: Marius Rejdak
*/
#include <math.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include "utils.h"
#include "cuda_utils.h"
/*
* Modyfikacja funkcji sumy prefiksowej
* dla potrzeb sortowania pozycyjnego
*/
__global__ static void CUDA_RadixPrefixSum(
Element* __restrict__ values,
int32_t* __restrict__ values_masks,
int32_t* __restrict__ aux,
const Key mask) // Wybór bitu po którym następuje sortowanie
{
const int32_t idx = (TDIM * BID + TID) << 1;
// Każdy wątek przetwarza 2 elementy jednocześnie
const int32_t tmp_in0 = (values[idx].k & mask) ? 0 : 1;
const int32_t tmp_in1 = (values[idx + 1].k & mask) ? 0 : 1;
// Wykorzystanie pamięci współdzielonej do optymalizacji
extern __shared__ int32_t shared_int32[];
shared_int32[TID] = tmp_in0 + tmp_in1;
__syncthreads();
// Sumowanie elementów do przodu
for (int32_t i = 1; i < TDIM; i <<= 1) {
const int32_t x = (i<<1)-1;
if (TID >= i && (TID & x) == x) {
shared_int32[TID] += shared_int32[TID - i];
}
__syncthreads();
}
if (TID == 0)
shared_int32[TDIM - 1] = 0;
__syncthreads();
// Sumowanie elementów wstecz
for (int32_t i = TDIM>>1; i >= 1; i >>= 1) {
int32_t x = (i<<1)-1;
if (TID >= i && (TID & x) == x) {
int32_t temp = shared_int32[TID - i];
shared_int32[TID - i] = shared_int32[TID];
shared_int32[TID] += temp;
}
__syncthreads();
}
// Zapisanie wyników sumy prefiksowej
values_masks[idx] = shared_int32[TID];
values_masks[idx + 1] = shared_int32[TID] + tmp_in0;
// Zapisanie sumy całego bloku dla korekty kolejnych bloków
if (TID == TDIM-1)
aux[BID] = tmp_in0 + shared_int32[TID] + tmp_in1;
}
/*
* Przestawianie elementów dla wyliczonej sumy prefiksowej
*/
__global__ static void CUDA_RadixSort(
Element* __restrict__ values,
Element* __restrict__ values_sorted,
int32_t* __restrict__ values_masks_psum,
const Key mask)
{
const int32_t idx = TDIM * BID + TID;
const int32_t bdim = TDIM * BDIM;
const Element current = values[idx];
const int32_t new_idx = values_masks_psum[idx];
if (current.k & mask)
values_sorted[idx + (values_masks_psum[bdim-1]
+ ((values[bdim-1].k & mask) ? 0 : 1))
- new_idx] = current;
else
values_sorted[new_idx] = current;
}
/*
* Wywołania funkcji kernel dla sumy prefiksowej
*/
__host__ void RadixPrefixSum(Element* d_mem_values, int32_t* d_mem_masks,
const int32_t N, const Key mask)
{
int32_t *d_mem_aux;
kdim v = get_kdim(N);
// Alokacja pamięci tymczasowej
gpuErrchk( cudaMalloc(&d_mem_aux, v.num_blocks * sizeof(int32_t)) );
// Suma prefiksowa w poszczególnych blokach
CUDA_RadixPrefixSum<<<v.dim_blocks,
v.num_threads>>1,
v.num_threads*sizeof(int32_t)>>>
(d_mem_values, d_mem_masks, d_mem_aux, mask);
cudaDeviceSynchronize();
gpuErrchk( cudaPeekAtLastError() );
if (v.num_blocks > 1) {
// Korekta sum dla kolejnych bloków
SumScan_Inclusive(d_mem_aux, v.num_blocks);
CUDA_SumScanUpdate<<<v.dim_blocks, v.num_threads>>>
(d_mem_masks, d_mem_aux);
cudaDeviceSynchronize();
gpuErrchk( cudaPeekAtLastError() );
}
cudaFree(d_mem_aux);
}
/*
* Wywołania funkcji kernel sortowania pozycyjnego
* i synchronizacja z sumą prefiksową
*/
__host__ void inline RadixSort(Element* d_mem_values,
Element* d_mem_sorted,
const int32_t N)
{
Element *d_v, *d_s;
int32_t *d_m;
kdim v = get_kdim(N);
// Pamięć tymczasowa
gpuErrchk( cudaMalloc(&d_m, N * sizeof(int32_t)) );
// Sortowanie LSB
for (int16_t bit = 0; bit < sizeof(Key)*8; ++bit) {
Key mask = 1 << bit;
if (bit % 2) {
d_v = d_mem_values;
d_s = d_mem_sorted;
} else {
d_v = d_mem_sorted;
d_s = d_mem_values;
}
RadixPrefixSum(d_v, d_m, N, mask);
CUDA_RadixSort<<<v.dim_blocks, v.num_threads>>>
(d_v, d_s, d_m, mask);
cudaDeviceSynchronize();
gpuErrchk( cudaPeekAtLastError() );
}
cudaFree(d_m);
}
// program main
int main(int argc, char** argv)
{
void *h_mem, *d_mem_values, *d_mem_sorted;
h_mem = malloc(MAX_SIZE);
assert(h_mem != NULL);
gpuErrchk( cudaMalloc(&d_mem_values, MAX_SIZE) );
gpuErrchk( cudaMalloc(&d_mem_sorted, MAX_SIZE) );
srand(time(NULL));
printf("Radix sort\n");
printf("%s,%s,%ld,%ld\n", "size", "time", CLOCKS_PER_SEC, sizeof(Element));
for(int32_t size = MIN_SIZE; size <= MAX_SIZE; size <<= 1) {
int32_t N = size/sizeof(Element);
clock_t t1, t2, t_sum = 0;
for (int i = 0; i < NUM_PASSES; ++i) {
init_values((Element*) h_mem, N);
copy_to_device_time(d_mem_values, h_mem, size);
cudaDeviceSynchronize();
t1 = clock();
RadixSort((Element*) d_mem_values, (Element*) d_mem_sorted, N);
cudaDeviceSynchronize();
t2 = clock();
t_sum += t2 - t1;
gpuErrchk( cudaPeekAtLastError() );
copy_to_host_time(h_mem, d_mem_sorted, size);
cudaDeviceSynchronize();
assert(is_int_array_sorted((Element*) h_mem, N, false));
}
t_sum /= NUM_PASSES;
printf("%ld,%ld\n", N, t_sum);
}
cudaFree(d_mem_values);
cudaFree(d_mem_sorted);
free(h_mem);
return 0;
}
|
e39ffb39e5f4872d659de7f8c050a3b7763f829a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "vwc_process.cuh"
#include "../common/user_specified_device_functions.cuh"
// Virtual Warp-Centric (VWC) manner of processing graph using Compressed Sparse Row (CSR) representation format.
template < uint VWSize, uint VWMask >
__global__ void VWC_CSR_GPU_kernel(
const uint num_of_vertices,
const uint* edges_indices,
const uint* vertices_indices,
Vertex* VertexValue,
Edge* EdgeValue,
Vertex_static* VertexValue_static,
int* dev_finished ) {
__shared__ Vertex final_vertex_values[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >> VWMask ];
__shared__ Vertex thread_outcome[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE ];
volatile __shared__ uint edges_starting_address[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >> VWMask ];
volatile __shared__ uint ngbrs_size[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >> VWMask ];
Vertex previous_vertex_value;
// You might gain some performance if you limit maximum number of registers per thread with -maxrregcount flag. For example, specifying 32 for the Kepler architecture.
const uint warp_in_block_offset = threadIdx.x >> VWMask;
const uint VLane_id = threadIdx.x & (VWSize-1);
const uint t_id = threadIdx.x + blockIdx.x * blockDim.x;
const uint VW_id = t_id >> VWMask;
if( VW_id >= num_of_vertices )
return;
previous_vertex_value = VertexValue[ VW_id ];
// Only one virtual lane in the virtual warp does vertex initialization.
if( VLane_id == 0 ) {
edges_starting_address[ warp_in_block_offset ] = vertices_indices[ VW_id ];
ngbrs_size[ warp_in_block_offset ] = vertices_indices[ VW_id + 1 ] - edges_starting_address[ warp_in_block_offset ] ;
init_compute( final_vertex_values + warp_in_block_offset, &previous_vertex_value );
}
for( uint index = VLane_id; index < ngbrs_size[ warp_in_block_offset ]; index += VWSize ) {
uint target_edge = edges_starting_address[ warp_in_block_offset ] + index;
uint target_vertex = edges_indices[ target_edge ];
compute_local(
VertexValue[target_vertex],
VertexValue_static + target_vertex,
EdgeValue + target_edge,
thread_outcome + threadIdx.x,
final_vertex_values + warp_in_block_offset );
// Parallel Reduction. Totally unrolled.
if( VWSize == 32 )
if( VLane_id < 16 )
if( (index + 16) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 16 );
if( VWSize >= 16 )
if( VLane_id < 8 )
if( (index + 8) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 8 );
if( VWSize >= 8 )
if( VLane_id < 4 )
if( (index + 4) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 4 );
if( VWSize >= 4 )
if( VLane_id < 2 )
if( (index + 2) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 2 );
if( VWSize >= 2 )
if( VLane_id < 1 ) {
if( (index + 1) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 1 );
compute_reduce( final_vertex_values + warp_in_block_offset, thread_outcome + threadIdx.x ); // Virtual lane 0 saves the final value of current iteration.
}
}
if( VLane_id == 0 )
if( update_condition ( final_vertex_values + warp_in_block_offset, &previous_vertex_value ) ) {
(*dev_finished) = 1;
VertexValue[ VW_id ] = final_vertex_values[ warp_in_block_offset ];
}
}
void vwc_process(
int vwSize,
uint gridDimen,
const uint nVertices,
const uint* vertexIndices,
const uint* edgesIndices,
Vertex* VertexValue,
Edge* EdgeValue,
Vertex_static* VertexValueStatic,
int* finished ) {
switch( vwSize ) {
case(32):
hipLaunchKernelGGL(( VWC_CSR_GPU_kernel< 32, 5 >)
, dim3(gridDimen), dim3(VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE) , 0, 0,
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(16):
hipLaunchKernelGGL(( VWC_CSR_GPU_kernel< 16, 4 >)
, dim3(gridDimen), dim3(VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE) , 0, 0,
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(8):
hipLaunchKernelGGL(( VWC_CSR_GPU_kernel< 8, 3 >)
, dim3(gridDimen), dim3(VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE) , 0, 0,
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(4):
hipLaunchKernelGGL(( VWC_CSR_GPU_kernel< 4, 2 >)
, dim3(gridDimen), dim3(VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE) , 0, 0,
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(2):
hipLaunchKernelGGL(( VWC_CSR_GPU_kernel< 2, 1 >)
, dim3(gridDimen), dim3(VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE) , 0, 0,
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
}
}
| e39ffb39e5f4872d659de7f8c050a3b7763f829a.cu |
#include "vwc_process.cuh"
#include "../common/user_specified_device_functions.cuh"
// Virtual Warp-Centric (VWC) manner of processing graph using Compressed Sparse Row (CSR) representation format.
template < uint VWSize, uint VWMask >
__global__ void VWC_CSR_GPU_kernel(
const uint num_of_vertices,
const uint* edges_indices,
const uint* vertices_indices,
Vertex* VertexValue,
Edge* EdgeValue,
Vertex_static* VertexValue_static,
int* dev_finished ) {
__shared__ Vertex final_vertex_values[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >> VWMask ];
__shared__ Vertex thread_outcome[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE ];
volatile __shared__ uint edges_starting_address[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >> VWMask ];
volatile __shared__ uint ngbrs_size[ VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >> VWMask ];
Vertex previous_vertex_value;
// You might gain some performance if you limit maximum number of registers per thread with -maxrregcount flag. For example, specifying 32 for the Kepler architecture.
const uint warp_in_block_offset = threadIdx.x >> VWMask;
const uint VLane_id = threadIdx.x & (VWSize-1);
const uint t_id = threadIdx.x + blockIdx.x * blockDim.x;
const uint VW_id = t_id >> VWMask;
if( VW_id >= num_of_vertices )
return;
previous_vertex_value = VertexValue[ VW_id ];
// Only one virtual lane in the virtual warp does vertex initialization.
if( VLane_id == 0 ) {
edges_starting_address[ warp_in_block_offset ] = vertices_indices[ VW_id ];
ngbrs_size[ warp_in_block_offset ] = vertices_indices[ VW_id + 1 ] - edges_starting_address[ warp_in_block_offset ] ;
init_compute( final_vertex_values + warp_in_block_offset, &previous_vertex_value );
}
for( uint index = VLane_id; index < ngbrs_size[ warp_in_block_offset ]; index += VWSize ) {
uint target_edge = edges_starting_address[ warp_in_block_offset ] + index;
uint target_vertex = edges_indices[ target_edge ];
compute_local(
VertexValue[target_vertex],
VertexValue_static + target_vertex,
EdgeValue + target_edge,
thread_outcome + threadIdx.x,
final_vertex_values + warp_in_block_offset );
// Parallel Reduction. Totally unrolled.
if( VWSize == 32 )
if( VLane_id < 16 )
if( (index + 16) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 16 );
if( VWSize >= 16 )
if( VLane_id < 8 )
if( (index + 8) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 8 );
if( VWSize >= 8 )
if( VLane_id < 4 )
if( (index + 4) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 4 );
if( VWSize >= 4 )
if( VLane_id < 2 )
if( (index + 2) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 2 );
if( VWSize >= 2 )
if( VLane_id < 1 ) {
if( (index + 1) < ngbrs_size[ warp_in_block_offset ])
compute_reduce( thread_outcome + threadIdx.x, thread_outcome + threadIdx.x + 1 );
compute_reduce( final_vertex_values + warp_in_block_offset, thread_outcome + threadIdx.x ); // Virtual lane 0 saves the final value of current iteration.
}
}
if( VLane_id == 0 )
if( update_condition ( final_vertex_values + warp_in_block_offset, &previous_vertex_value ) ) {
(*dev_finished) = 1;
VertexValue[ VW_id ] = final_vertex_values[ warp_in_block_offset ];
}
}
void vwc_process(
int vwSize,
uint gridDimen,
const uint nVertices,
const uint* vertexIndices,
const uint* edgesIndices,
Vertex* VertexValue,
Edge* EdgeValue,
Vertex_static* VertexValueStatic,
int* finished ) {
switch( vwSize ) {
case(32):
VWC_CSR_GPU_kernel< 32, 5 >
<<< gridDimen, VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >>> (
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(16):
VWC_CSR_GPU_kernel< 16, 4 >
<<< gridDimen, VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >>> (
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(8):
VWC_CSR_GPU_kernel< 8, 3 >
<<< gridDimen, VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >>> (
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(4):
VWC_CSR_GPU_kernel< 4, 2 >
<<< gridDimen, VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >>> (
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
case(2):
VWC_CSR_GPU_kernel< 2, 1 >
<<< gridDimen, VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE >>> (
nVertices,
vertexIndices,
edgesIndices,
VertexValue,
EdgeValue,
VertexValueStatic,
finished );
break;
}
}
|
df81b3bd449cba53dbd2070e18a42aefe605c928.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "countTriangles.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint2 *validPoints = NULL;
hipMalloc(&validPoints, XSIZE*YSIZE);
int *count = NULL;
hipMalloc(&count, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
countTriangles), dim3(gridBlock),dim3(threadBlock), 0, 0, validPoints,count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
countTriangles), dim3(gridBlock),dim3(threadBlock), 0, 0, validPoints,count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
countTriangles), dim3(gridBlock),dim3(threadBlock), 0, 0, validPoints,count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | df81b3bd449cba53dbd2070e18a42aefe605c928.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "countTriangles.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint2 *validPoints = NULL;
cudaMalloc(&validPoints, XSIZE*YSIZE);
int *count = NULL;
cudaMalloc(&count, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
countTriangles<<<gridBlock,threadBlock>>>(validPoints,count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
countTriangles<<<gridBlock,threadBlock>>>(validPoints,count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
countTriangles<<<gridBlock,threadBlock>>>(validPoints,count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
weighted_softmax_loss_layer.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const Dtype* weights,
Dtype* loss, const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -weights[label_value] * log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* cls_loss_weights = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
LOG(INFO)<<"label is"<<&label;
LOG(INFO)<<"weight is "<<&cls_loss_weights;
hipLaunchKernelGGL(( WeightedSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, cls_loss_weights,
loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, const Dtype* weights, Dtype* bottom_diff, const int num,
const int dim, const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= weights[label_value];
}
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to weight inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const Dtype* cls_loss_weights = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( WeightedSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, cls_loss_weights, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
| weighted_softmax_loss_layer.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/weighted_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void WeightedSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, const Dtype* weights,
Dtype* loss, const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -weights[label_value] * log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* cls_loss_weights = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
LOG(INFO)<<"label is"<<&label;
LOG(INFO)<<"weight is "<<&cls_loss_weights;
WeightedSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, cls_loss_weights,
loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void WeightedSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, const Dtype* weights, Dtype* bottom_diff, const int num,
const int dim, const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] *= weights[label_value];
}
}
}
}
template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to weight inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const Dtype* cls_loss_weights = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
WeightedSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, cls_loss_weights, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSoftmaxWithLossLayer);
} // namespace caffe
|
066d6e8363dd3051c586f053d6b32444676bfe87.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
template <typename Dtype, typename Acctype>
struct abs_functor
{
__host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const
{
Dtype z = x-y;
return ScalarConvert<Dtype, Acctype>::to(z >= 0 ? z : -z);
}
};
template <typename Dtype>
struct abs_updateOutput_no_reduce_functor
{
__host__ __device__ void operator()(const Dtype* x, const Dtype* y, Dtype *out)
{
Dtype z = *x - *y;
*out = z >= 0 ? z : -z;
}
};
template <typename Dtype>
struct abs_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *gradInput)
{
*gradInput = ScalarConvert<int, Dtype>::to(*x >= *y ? 1 : -1);
}
};
template <typename Dtype>
struct abs_updateGradInput_functor
{
const Dtype norm;
const Dtype gradOutput;
abs_updateGradInput_functor(Dtype norm_, Dtype gradOutput_)
: norm(norm_), gradOutput(gradOutput_)
{}
__host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const
{
return ((x - y) >= 0 ? norm : -norm) * gradOutput;
}
};
#include "generic/AbsCriterion.cu"
#include "THHGenerateFloatTypes.h"
| 066d6e8363dd3051c586f053d6b32444676bfe87.cu | #include "THCUNN.h"
#include "common.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
template <typename Dtype, typename Acctype>
struct abs_functor
{
__host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const
{
Dtype z = x-y;
return ScalarConvert<Dtype, Acctype>::to(z >= 0 ? z : -z);
}
};
template <typename Dtype>
struct abs_updateOutput_no_reduce_functor
{
__host__ __device__ void operator()(const Dtype* x, const Dtype* y, Dtype *out)
{
Dtype z = *x - *y;
*out = z >= 0 ? z : -z;
}
};
template <typename Dtype>
struct abs_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *gradInput)
{
*gradInput = ScalarConvert<int, Dtype>::to(*x >= *y ? 1 : -1);
}
};
template <typename Dtype>
struct abs_updateGradInput_functor
{
const Dtype norm;
const Dtype gradOutput;
abs_updateGradInput_functor(Dtype norm_, Dtype gradOutput_)
: norm(norm_), gradOutput(gradOutput_)
{}
__host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const
{
return ((x - y) >= 0 ? norm : -norm) * gradOutput;
}
};
#include "generic/AbsCriterion.cu"
#include "THCGenerateFloatTypes.h"
|
d982116a98a165e7ea66a4a8120f2d30081c99d7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier hlinsenmaier@nvidia.com
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier hlinsenmaier@nvidia.com
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph.hpp>
#include <hip/hip_runtime_api.h>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <fstream>
#include <set>
#include <vector>
typedef struct Tsp_Usecase_t {
std::string tsp_file;
float ref_cost;
Tsp_Usecase_t(const std::string& a, const float c)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
tsp_file = rapidsDatasetRootDir + "/" + a;
} else {
tsp_file = a;
}
ref_cost = c;
}
Tsp_Usecase_t& operator=(const Tsp_Usecase_t& rhs)
{
tsp_file = rhs.tsp_file;
ref_cost = rhs.ref_cost;
return *this;
}
} Tsp_Usecase;
static std::vector<Tsp_Usecase_t> euc_2d{
{"tsplib/datasets/a280.tsp", 2579}, {"tsplib/datasets/berlin52.tsp", 7542},
{"tsplib/datasets/bier127.tsp", 118282}, {"tsplib/datasets/ch130.tsp", 6110},
{"tsplib/datasets/ch150.tsp", 6528}, {"tsplib/datasets/d1291.tsp", 50801},
{"tsplib/datasets/d1655.tsp", 62128}, {"tsplib/datasets/d198.tsp", 15780},
{"tsplib/datasets/d2103.tsp", 80450}, {"tsplib/datasets/d493.tsp", 35002},
{"tsplib/datasets/d657.tsp", 48912}, {"tsplib/datasets/eil101.tsp", 629},
{"tsplib/datasets/eil51.tsp", 426}, {"tsplib/datasets/eil76.tsp", 538},
{"tsplib/datasets/fl1400.tsp", 20127}, {"tsplib/datasets/fl1577.tsp", 22249},
{"tsplib/datasets/fl417.tsp", 11861}, {"tsplib/datasets/gil262.tsp", 2378},
{"tsplib/datasets/kroA100.tsp", 21282}, {"tsplib/datasets/kroA150.tsp", 26524},
{"tsplib/datasets/kroA200.tsp", 29368}, {"tsplib/datasets/kroB100.tsp", 22141},
{"tsplib/datasets/kroB150.tsp", 26130}, {"tsplib/datasets/kroB200.tsp", 29437},
{"tsplib/datasets/kroC100.tsp", 20749}, {"tsplib/datasets/kroD100.tsp", 21294},
{"tsplib/datasets/kroE100.tsp", 22068}, {"tsplib/datasets/lin105.tsp", 14379},
{"tsplib/datasets/lin318.tsp", 42029}, {"tsplib/datasets/nrw1379.tsp", 56638},
{"tsplib/datasets/p654.tsp", 34643}, {"tsplib/datasets/pcb1173.tsp", 56892},
{"tsplib/datasets/pcb442.tsp", 50778}, {"tsplib/datasets/pr1002.tsp", 259045},
{"tsplib/datasets/pr107.tsp", 44303}, {"tsplib/datasets/pr136.tsp", 96772},
{"tsplib/datasets/pr144.tsp", 58537}, {"tsplib/datasets/pr152.tsp", 73682},
{"tsplib/datasets/pr226.tsp", 80369}, {"tsplib/datasets/pr264.tsp", 49135},
{"tsplib/datasets/pr299.tsp", 48191}, {"tsplib/datasets/pr439.tsp", 107217},
{"tsplib/datasets/pr76.tsp", 108159}, {"tsplib/datasets/rat195.tsp", 2323},
{"tsplib/datasets/rat575.tsp", 6773}, {"tsplib/datasets/rat783.tsp", 8806},
{"tsplib/datasets/rat99.tsp", 1211}, {"tsplib/datasets/rd100.tsp", 7910},
{"tsplib/datasets/rd400.tsp", 15281}, {"tsplib/datasets/rl1323.tsp", 270199},
{"tsplib/datasets/st70.tsp", 675}, {"tsplib/datasets/ts225.tsp", 126643},
{"tsplib/datasets/tsp225.tsp", 3916}, {"tsplib/datasets/u1060.tsp", 224094},
{"tsplib/datasets/u1432.tsp", 152970}, {"tsplib/datasets/u159.tsp", 42080},
{"tsplib/datasets/u574.tsp", 36905}, {"tsplib/datasets/u724.tsp", 41910},
{"tsplib/datasets/vm1084.tsp", 239297},
};
struct Route {
std::vector<int> cities;
std::vector<float> x_pos;
std::vector<float> y_pos;
};
class Tests_Tsp : public ::testing::TestWithParam<Tsp_Usecase> {
public:
Tests_Tsp() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
void run_current_test(const Tsp_Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.tsp_file) + std::string("_") +
ss.str().c_str();
float tol = 1E-1f;
HighResClock hr_clock;
double time_tmp;
Route input;
std::cout << "File: " << param.tsp_file.c_str() << "\n";
int nodes = load_tsp(param.tsp_file.c_str(), &input);
// Device alloc
raft::handle_t const handle;
auto stream = handle.get_stream();
rmm::device_uvector<int> vertices(static_cast<size_t>(nodes), stream);
rmm::device_uvector<int> route(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> x_pos(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> y_pos(static_cast<size_t>(nodes), stream);
int* vtx_ptr = vertices.data();
int* d_route = route.data();
float* d_x_pos = x_pos.data();
float* d_y_pos = y_pos.data();
CUDA_TRY(hipMemcpy(vtx_ptr, input.cities.data(), sizeof(int) * nodes, hipMemcpyHostToDevice));
CUDA_TRY(
hipMemcpy(d_x_pos, input.x_pos.data(), sizeof(float) * nodes, hipMemcpyHostToDevice));
CUDA_TRY(
hipMemcpy(d_y_pos, input.y_pos.data(), sizeof(float) * nodes, hipMemcpyHostToDevice));
// Default parameters
int restarts = 4096;
bool beam_search = true;
int k = 4;
int nstart = 0;
bool verbose = false;
hr_clock.start();
hipDeviceSynchronize();
hipProfilerStart();
float final_cost = cugraph::traveling_salesperson(
handle, vtx_ptr, d_x_pos, d_y_pos, nodes, restarts, beam_search, k, nstart, verbose, d_route);
hipProfilerStop();
hipDeviceSynchronize();
hr_clock.stop(&time_tmp);
std::vector<int> h_route;
h_route.resize(nodes);
std::vector<int> h_vertices;
h_vertices.resize(nodes);
CUDA_TRY(hipMemcpy(h_route.data(), d_route, sizeof(int) * nodes, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
CUDA_TRY(hipMemcpy(h_vertices.data(), vtx_ptr, sizeof(int) * nodes, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
std::cout << "tsp_time: " << time_tmp << " us" << std::endl;
std::cout << "Ref cost is: " << param.ref_cost << "\n";
std::cout << "Final cost is: " << final_cost << "\n";
float err = fabs(final_cost - param.ref_cost);
err /= param.ref_cost;
std::cout << "Approximation error is: " << err * 100 << "%\n";
EXPECT_LE(err, tol);
// Check route goes through each vertex once
size_t u_nodes = nodes;
std::set<int> node_set(h_route.begin(), h_route.end());
ASSERT_EQ(node_set.size(), u_nodes);
// Bound check
int max = *std::max_element(h_vertices.begin(), h_vertices.end());
int min = *std::min_element(h_vertices.begin(), h_vertices.end());
EXPECT_GE(*node_set.begin(), min);
EXPECT_LE(*node_set.rbegin(), max);
}
private:
std::vector<std::string> split(const std::string& s, char delimiter)
{
std::vector<std::string> tokens;
std::string token;
std::istringstream tokenStream(s);
while (std::getline(tokenStream, token, delimiter)) {
if (token.size() == 0) continue;
tokens.push_back(token);
}
return tokens;
}
// FIXME: At the moment TSP does not accept a graph_t as input and therefore
// deviates from the standard testing I/O pattern. Once other input types
// are supported we want to reconcile TSP testing with the rest of cugraph.
int load_tsp(const char* fname, Route* input)
{
std::fstream fs;
fs.open(fname);
std::string line;
std::vector<std::string> tokens;
int nodes = 0;
while (std::getline(fs, line) && line.find(':') != std::string::npos) {
tokens = split(line, ':');
auto strip_token = split(tokens[0], ' ')[0];
if (strip_token == "DIMENSION") nodes = std::stof(tokens[1]);
}
while (std::getline(fs, line) && line.find(' ') != std::string::npos) {
tokens = split(line, ' ');
auto city_id = std::stof(tokens[0]);
auto x = std::stof(tokens[1]);
auto y = std::stof(tokens[2]);
input->cities.push_back(city_id);
input->x_pos.push_back(x);
input->y_pos.push_back(y);
}
fs.close();
assert(nodes == input->cities.size());
return nodes;
}
};
TEST_P(Tests_Tsp, CheckFP32_T) { run_current_test(GetParam()); }
INSTANTIATE_TEST_SUITE_P(simple_test, Tests_Tsp, ::testing::ValuesIn(euc_2d));
CUGRAPH_TEST_PROGRAM_MAIN()
| d982116a98a165e7ea66a4a8120f2d30081c99d7.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier hlinsenmaier@nvidia.com
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// TSP solver tests
// Author: Hugo Linsenmaier hlinsenmaier@nvidia.com
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph.hpp>
#include <cuda_profiler_api.h>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <fstream>
#include <set>
#include <vector>
typedef struct Tsp_Usecase_t {
std::string tsp_file;
float ref_cost;
Tsp_Usecase_t(const std::string& a, const float c)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
tsp_file = rapidsDatasetRootDir + "/" + a;
} else {
tsp_file = a;
}
ref_cost = c;
}
Tsp_Usecase_t& operator=(const Tsp_Usecase_t& rhs)
{
tsp_file = rhs.tsp_file;
ref_cost = rhs.ref_cost;
return *this;
}
} Tsp_Usecase;
static std::vector<Tsp_Usecase_t> euc_2d{
{"tsplib/datasets/a280.tsp", 2579}, {"tsplib/datasets/berlin52.tsp", 7542},
{"tsplib/datasets/bier127.tsp", 118282}, {"tsplib/datasets/ch130.tsp", 6110},
{"tsplib/datasets/ch150.tsp", 6528}, {"tsplib/datasets/d1291.tsp", 50801},
{"tsplib/datasets/d1655.tsp", 62128}, {"tsplib/datasets/d198.tsp", 15780},
{"tsplib/datasets/d2103.tsp", 80450}, {"tsplib/datasets/d493.tsp", 35002},
{"tsplib/datasets/d657.tsp", 48912}, {"tsplib/datasets/eil101.tsp", 629},
{"tsplib/datasets/eil51.tsp", 426}, {"tsplib/datasets/eil76.tsp", 538},
{"tsplib/datasets/fl1400.tsp", 20127}, {"tsplib/datasets/fl1577.tsp", 22249},
{"tsplib/datasets/fl417.tsp", 11861}, {"tsplib/datasets/gil262.tsp", 2378},
{"tsplib/datasets/kroA100.tsp", 21282}, {"tsplib/datasets/kroA150.tsp", 26524},
{"tsplib/datasets/kroA200.tsp", 29368}, {"tsplib/datasets/kroB100.tsp", 22141},
{"tsplib/datasets/kroB150.tsp", 26130}, {"tsplib/datasets/kroB200.tsp", 29437},
{"tsplib/datasets/kroC100.tsp", 20749}, {"tsplib/datasets/kroD100.tsp", 21294},
{"tsplib/datasets/kroE100.tsp", 22068}, {"tsplib/datasets/lin105.tsp", 14379},
{"tsplib/datasets/lin318.tsp", 42029}, {"tsplib/datasets/nrw1379.tsp", 56638},
{"tsplib/datasets/p654.tsp", 34643}, {"tsplib/datasets/pcb1173.tsp", 56892},
{"tsplib/datasets/pcb442.tsp", 50778}, {"tsplib/datasets/pr1002.tsp", 259045},
{"tsplib/datasets/pr107.tsp", 44303}, {"tsplib/datasets/pr136.tsp", 96772},
{"tsplib/datasets/pr144.tsp", 58537}, {"tsplib/datasets/pr152.tsp", 73682},
{"tsplib/datasets/pr226.tsp", 80369}, {"tsplib/datasets/pr264.tsp", 49135},
{"tsplib/datasets/pr299.tsp", 48191}, {"tsplib/datasets/pr439.tsp", 107217},
{"tsplib/datasets/pr76.tsp", 108159}, {"tsplib/datasets/rat195.tsp", 2323},
{"tsplib/datasets/rat575.tsp", 6773}, {"tsplib/datasets/rat783.tsp", 8806},
{"tsplib/datasets/rat99.tsp", 1211}, {"tsplib/datasets/rd100.tsp", 7910},
{"tsplib/datasets/rd400.tsp", 15281}, {"tsplib/datasets/rl1323.tsp", 270199},
{"tsplib/datasets/st70.tsp", 675}, {"tsplib/datasets/ts225.tsp", 126643},
{"tsplib/datasets/tsp225.tsp", 3916}, {"tsplib/datasets/u1060.tsp", 224094},
{"tsplib/datasets/u1432.tsp", 152970}, {"tsplib/datasets/u159.tsp", 42080},
{"tsplib/datasets/u574.tsp", 36905}, {"tsplib/datasets/u724.tsp", 41910},
{"tsplib/datasets/vm1084.tsp", 239297},
};
struct Route {
std::vector<int> cities;
std::vector<float> x_pos;
std::vector<float> y_pos;
};
class Tests_Tsp : public ::testing::TestWithParam<Tsp_Usecase> {
public:
Tests_Tsp() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
void run_current_test(const Tsp_Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.tsp_file) + std::string("_") +
ss.str().c_str();
float tol = 1E-1f;
HighResClock hr_clock;
double time_tmp;
Route input;
std::cout << "File: " << param.tsp_file.c_str() << "\n";
int nodes = load_tsp(param.tsp_file.c_str(), &input);
// Device alloc
raft::handle_t const handle;
auto stream = handle.get_stream();
rmm::device_uvector<int> vertices(static_cast<size_t>(nodes), stream);
rmm::device_uvector<int> route(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> x_pos(static_cast<size_t>(nodes), stream);
rmm::device_uvector<float> y_pos(static_cast<size_t>(nodes), stream);
int* vtx_ptr = vertices.data();
int* d_route = route.data();
float* d_x_pos = x_pos.data();
float* d_y_pos = y_pos.data();
CUDA_TRY(cudaMemcpy(vtx_ptr, input.cities.data(), sizeof(int) * nodes, cudaMemcpyHostToDevice));
CUDA_TRY(
cudaMemcpy(d_x_pos, input.x_pos.data(), sizeof(float) * nodes, cudaMemcpyHostToDevice));
CUDA_TRY(
cudaMemcpy(d_y_pos, input.y_pos.data(), sizeof(float) * nodes, cudaMemcpyHostToDevice));
// Default parameters
int restarts = 4096;
bool beam_search = true;
int k = 4;
int nstart = 0;
bool verbose = false;
hr_clock.start();
cudaDeviceSynchronize();
cudaProfilerStart();
float final_cost = cugraph::traveling_salesperson(
handle, vtx_ptr, d_x_pos, d_y_pos, nodes, restarts, beam_search, k, nstart, verbose, d_route);
cudaProfilerStop();
cudaDeviceSynchronize();
hr_clock.stop(&time_tmp);
std::vector<int> h_route;
h_route.resize(nodes);
std::vector<int> h_vertices;
h_vertices.resize(nodes);
CUDA_TRY(cudaMemcpy(h_route.data(), d_route, sizeof(int) * nodes, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
CUDA_TRY(cudaMemcpy(h_vertices.data(), vtx_ptr, sizeof(int) * nodes, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
std::cout << "tsp_time: " << time_tmp << " us" << std::endl;
std::cout << "Ref cost is: " << param.ref_cost << "\n";
std::cout << "Final cost is: " << final_cost << "\n";
float err = fabs(final_cost - param.ref_cost);
err /= param.ref_cost;
std::cout << "Approximation error is: " << err * 100 << "%\n";
EXPECT_LE(err, tol);
// Check route goes through each vertex once
size_t u_nodes = nodes;
std::set<int> node_set(h_route.begin(), h_route.end());
ASSERT_EQ(node_set.size(), u_nodes);
// Bound check
int max = *std::max_element(h_vertices.begin(), h_vertices.end());
int min = *std::min_element(h_vertices.begin(), h_vertices.end());
EXPECT_GE(*node_set.begin(), min);
EXPECT_LE(*node_set.rbegin(), max);
}
private:
std::vector<std::string> split(const std::string& s, char delimiter)
{
std::vector<std::string> tokens;
std::string token;
std::istringstream tokenStream(s);
while (std::getline(tokenStream, token, delimiter)) {
if (token.size() == 0) continue;
tokens.push_back(token);
}
return tokens;
}
// FIXME: At the moment TSP does not accept a graph_t as input and therefore
// deviates from the standard testing I/O pattern. Once other input types
// are supported we want to reconcile TSP testing with the rest of cugraph.
int load_tsp(const char* fname, Route* input)
{
std::fstream fs;
fs.open(fname);
std::string line;
std::vector<std::string> tokens;
int nodes = 0;
while (std::getline(fs, line) && line.find(':') != std::string::npos) {
tokens = split(line, ':');
auto strip_token = split(tokens[0], ' ')[0];
if (strip_token == "DIMENSION") nodes = std::stof(tokens[1]);
}
while (std::getline(fs, line) && line.find(' ') != std::string::npos) {
tokens = split(line, ' ');
auto city_id = std::stof(tokens[0]);
auto x = std::stof(tokens[1]);
auto y = std::stof(tokens[2]);
input->cities.push_back(city_id);
input->x_pos.push_back(x);
input->y_pos.push_back(y);
}
fs.close();
assert(nodes == input->cities.size());
return nodes;
}
};
TEST_P(Tests_Tsp, CheckFP32_T) { run_current_test(GetParam()); }
INSTANTIATE_TEST_SUITE_P(simple_test, Tests_Tsp, ::testing::ValuesIn(euc_2d));
CUGRAPH_TEST_PROGRAM_MAIN()
|
871a501fdeb28ca4b11420746b5c2182c365f1cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
hipMalloc(&X, size); // allocate GPU memory
hipMalloc(&Y, size);
hipMemcpy(X, X_h, size, hipMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
hipMalloc(&S, size_s);
hipMalloc(&S1, size_s);
hipLaunchKernelGGL(( Prefix_sum_oneblock_kernel), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, X, Y, n, S);
hipLaunchKernelGGL(( Prefix_sum_oneblock_kernel), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, S, S, n, S1);
hipLaunchKernelGGL(( Add_scalar_to_subvector), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, Y, S, n);
hipMemcpy(Y_h, Y, size, hipMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
hipFree(X); hipFree(Y);
free(X_h); free(Y_h);
}
| 871a501fdeb28ca4b11420746b5c2182c365f1cc.cu | #include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
cudaMalloc(&X, size); // allocate GPU memory
cudaMalloc(&Y, size);
cudaMemcpy(X, X_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
cudaMalloc(&S, size_s);
cudaMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
cudaMemcpy(Y_h, Y, size, cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
cudaFree(X); cudaFree(Y);
free(X_h); free(Y_h);
}
|
48c9dc71d4c3eb9f39785ad43ee0441c250abd7d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 64>
template hipError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 64> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 64> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 64> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 64> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 48c9dc71d4c3eb9f39785ad43ee0441c250abd7d.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 64>
template cudaError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 64> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 64> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 64> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 64> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 64> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
0720e66d5215d395a56fd8ec971688ba11be7293.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math_constants.h>
#define OP_SUM 0
#define OP_MULT 1
#define MH_ADD 0
#define MH_DEL 1
#define MH_NOP 2
const int B = %(B)s; // blockDim.x
/**
* Helper function to sum across a block.
* Assume pS_data is already in shared memory
* Only the first thread returns a value in pSum
*/
__device__ void reduceBlock( float pSdata[B], float* pSum, int op )
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
// Sync all threads across the block
__syncthreads();
// Calculate the minimum value by doing a reduction
int half = (blockDim.x*blockDim.y) / 2;
if( idx < half )
{
while( half > 0 )
{
if(idx < half)
{
switch(op)
{
case OP_SUM:
pSdata[idx] = pSdata[idx] + pSdata[idx + half];
break;
case OP_MULT:
pSdata[idx] = pSdata[idx] * pSdata[idx + half];
break;
default:
// default to the identity
// TODO: throw error?
pSdata[idx] = pSdata[idx];
break;
}
}
half = half / 2;
__syncthreads();
}
}
// Store the minimum value back to global memory
if (idx == 0)
{
pSum[0] = pSdata[0];
}
}
/**
* Helper function to sum across a block.
* Assume pS_data is already in shared memory
* Only the first thread returns a value in pSum
*/
__device__ void reduceBlockDouble( double pSdata[B], double* pSum, int op )
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
// Sync all threads across the block
__syncthreads();
// Calculate the minimum value by doing a reduction
int half = (blockDim.x*blockDim.y) / 2;
if( idx < half )
{
while( half > 0 )
{
if(idx < half)
{
switch(op)
{
case OP_SUM:
pSdata[idx] = pSdata[idx] + pSdata[idx + half];
break;
case OP_MULT:
pSdata[idx] = pSdata[idx] * pSdata[idx + half];
break;
default:
// default to the identity
// TODO: throw error?
pSdata[idx] = pSdata[idx];
break;
}
}
half = half / 2;
__syncthreads();
}
}
// Store the minimum value back to global memory
if (idx == 0)
{
pSum[0] = pSdata[0];
}
}
/**
* Compute the weighted sum along columns of G as required for
* sampling new parents.
* This should be launched on a KxN/B grid of 1024x1 blocks.
*/
__global__ void computeWGSForAllSpikes(int K,
int N,
int* pC,
float* pGS,
int* pCols,
int* pRowInds,
float* pW,
bool* pA,
float* pWGS
)
{
int ki = blockIdx.y;
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j<N)
{
int kj = pC[j];
int k_ind = ki*K+kj;
// W is always populated, regardless of whether or not an edge exists
// float w = pA[k_ind] ? pW[k_ind] : 0.0;
float w = pW[k_ind];
// If the effective weight is greater than 0, compute the weighted
// sum along the column of GS
// if (w > 0.0)
// {
float gs_sum = 0.0;
// Iterate over row entries for dS column jj
// Since dS is stored in CSC format, pCols gives the
// pointers into dS for each column
for (int gs_off=pCols[j]; gs_off<pCols[j+1]; gs_off++)
{
if (pC[pRowInds[gs_off]]==ki)
{
gs_sum += pGS[gs_off];
}
}
// Multiply by the current weight
pWGS[ki*N+j] = w * gs_sum;
// }
// else
// {
// pWGS[ki*N+j] = 0.0;
// }
}
}
/**
* Compute the weighted sum along columns of G for a single block.
* This overrides the adjacency matrix and assumes an edge exists.
* This is necessary for computation of the Q ratio when a new edge
* is proposed.
* This should be launched on a 1xN/B grid of 1024x1 blocks.
*/
__global__ void computeWGSForNewEdge(int ki,
int kj,
int K,
int N,
int* pC,
float* pGS,
int* pCols,
int* pRowInds,
float* pW,
float* pWGS
)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
int k_ind = ki*K + kj;
int wgsOffset = ki*N;
float w = pW[k_ind];
if (j<N && pC[j]==kj)
{
float gs_sum = 0.0;
// Iterate over row entries for dS column jj
// Since dS is stored in CSC format, pCols gives the
// pointers into dS for each column
for (int gs_off=pCols[j]; gs_off<pCols[j+1]; gs_off++)
{
if (pC[pRowInds[gs_off]]==ki)
{
gs_sum += pGS[gs_off];
}
}
// Multiply by the current weight
pWGS[wgsOffset+j] = w * gs_sum;
}
}
/**
* Clear the WGS entries for a deleted edge
* This should be launched on a 1xNs[kj]/B grid of 1024x1 blocks.
*/
__global__ void clearWGSForDeletedEdge(int ki,
int kj,
int N,
int* pC,
float* pWGS
)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
int wgsOffset = ki*N;
if (j<N && pC[j]==kj)
{
pWGS[wgsOffset+j] = 0.0;
}
}
/**
* Compute the MH acceptance probability of adding edge (i,j)
* This should be launched on a grid of (1xceil(Ns[kj]/B)) blocks
* each with 1024 threads. The output should be a float vector
* of size (1x ceil(Ns[kj]/B))
*
* For stability we compute this in log space
*
*/
__global__ void computeProdQratio(int kj,
int K,
int N,
int* pC,
float* pWGS,
float* pLam,
int affectedRow,
int mhOp,
double* qr
)
{
int x = threadIdx.x;
int j = blockIdx.x * blockDim.x + x;
// The ratio of Q's invovles a product over all spikes
// we use shared memory to store the partial products each
// thread computes and then reduce as above
__shared__ double logQrat[B];
logQrat[x] = 0.0;
if (j<N && pC[j]==kj)
{
float lam = pLam[kj*N+j];
double wgsNum = lam; // numerator: sum over WGS's current and changed rows
double wgsDen = lam; // denomenator: sum over WGS's current rows
for (int ki=0; ki<K; ki++)
{
int wgsOffset = ki*N;
if (ki != affectedRow)
{
// both numerator and denominator match
wgsNum += pWGS[wgsOffset+j];
wgsDen += pWGS[wgsOffset+j];
}
else if (mhOp == MH_ADD)
{
// WGS computed with proposed edge weight
// Current rows sum to 0 since no edge present before
wgsNum += pWGS[wgsOffset+j];
}
else
{
// Edge is being removed
// WGS was computed with current edge and
// weight does not contribute to numerator
wgsDen += pWGS[wgsOffset+j];
}
}
logQrat[x] = logf(wgsNum) - logf(wgsDen);
}
// Take the product of Qrat by reduction
double sumLogQrat = 0.0;
reduceBlockDouble(logQrat, &sumLogQrat, OP_SUM);
if (x==0)
{
qr[blockIdx.x] = sumLogQrat;
}
}
__global__ void computeLkhdRatioA(int ki,
int kj,
int K,
int N,
bool* pA,
int* pC,
float* pWGS,
float* pLam,
float* lr
)
{
int x = threadIdx.x;
int j = blockIdx.x * blockDim.x + x;
// The ratio of Q's invovles a product over all spikes
// we use shared memory to store the partial products each
// thread computes and then reduce as above
__shared__ float logLkhdRat[B];
logLkhdRat[x] = 0.0;
if (j<N && pC[j]==kj)
{
float lam = pLam[kj*N+j];
float wgsNum = lam; // numerator: sum over WGS's current and changed rows
float wgsDen = lam; // denomenator: sum over WGS's current rows
for (int ii=0; ii<K; ii++)
{
int wgsOffset = ii*N;
if (ii != ki)
{
if (pA[ii*K+kj])
{
// both numerator and denominator match
wgsNum += pWGS[wgsOffset+j];
wgsDen += pWGS[wgsOffset+j];
}
}
else
{
// Only add WGS to the numerator for potential edge ki
wgsNum += pWGS[wgsOffset+j];
}
}
logLkhdRat[x] = logf(wgsNum) - logf(wgsDen);
}
// Take the product of Qrat by reduction
float sumLogLkhdRat = 0.0;
reduceBlock(logLkhdRat, &sumLogLkhdRat, OP_SUM);
if (x==0)
{
atomicAdd(lr,sumLogLkhdRat);
}
}
/**
* Sample a random value of A[ki,kj] given the two terms
* which add up to the log odds ratio and a random variate p
*/
__global__ void sampleA(int ki,
int kj,
int K,
bool* pA,
float logpratio,
float* pLogqratio,
float p
)
{
float logit_pr_A = logpratio + (*pLogqratio);
// compute log_pr_A using log-sum-exp trick
// log_pr_A = -logf(1.0+expf(-logit_pr_A))
float log_pr_A = 0.0;
if (logit_pr_A < 0.0)
{
log_pr_A = logit_pr_A - logf(1.0+expf(logit_pr_A));
}
else
{
log_pr_A = -logf(1.0+exp(-logit_pr_A));
}
// Sample A
if (logf(p) < log_pr_A)
{
pA[ki*K+kj] = true;
}
else
{
pA[ki*K+kj] = false;
}
}
/**
* Assign new parents neurons for each spike on k
* Launch this on 1xceil(N/B) grid of blocks
*/
__global__ void sampleNewParentProcs(int K,
int N,
int* pC,
bool* pA,
float* pW,
float* pWGS,
float* pLam,
float* urand,
int* pZ_temp
)
{
// TODO: index using both x and y for large datasets
int j = blockIdx.x*blockDim.x + threadIdx.x;
// Sum columns of WGS, taking into account the deleted row
// in the case of an edge removal
if (j<N)
{
int kj = pC[j];
float lam = pLam[kj*N+j];
// Clear whatever existing parent might exist
pZ_temp[j] = -1;
// In the first pass compute the cumulative sum.
// In the second pass we'll use this information to
// determine which process contains the parent spike.
float wgsSum = lam;
for (int ki=0; ki<K; ki++)
{
if (pA[ki*K+kj])
{
wgsSum += pWGS[ki*N+j];
}
}
// Check if this block is responsible for assigning
// a new parent based on the value of urand
float wu = urand[j] * wgsSum;
if (wu <= lam)
{
pZ_temp[j] = -1;
return;
}
wgsSum = lam;
for (int ki=0; ki<K; ki++)
{
if (pA[ki*K+kj])
{
wgsSum += pWGS[ki*N+j];
}
if (wgsSum >= wu)
{
pZ_temp[j] = ki;
break;
}
}
}
}
/**
* Assign new parents Z for each spike on a given process.
* This should be called with a grid of K x ceil(N/B)) blocks,
* each of which has B threads.
* Assume that an array of Ns[kj] uniform random
* numbers between [0,1] is present in the array urand. These will
* be used to identify the parent neuron and then the parent spike.
*/
__global__ void sampleNewParentSpikes(int K,
int N,
int* pC,
int* pZ_temp,
float* pGS,
int* pRowIndices,
int* pCols,
float* pW,
float* pWGS,
int* pZ,
float* urand
)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
int ki = blockIdx.y;
// Sum columns of WGS, taking into account the deleted row
// in the case of an edge removal
if (j<N)
{
// Find kj
int kj=pC[j];
int k_ind = ki*K + kj;
// Clear whatever existing parent might exist
if (ki==0 && pZ_temp[j]==-1)
{
pZ[j] = -1;
return;
}
if (pZ_temp[j]==ki)
{
// Check if this block is responsible for assigning
// a new parent based on the value of urand
// Update u so that in the subsequent step we can
// simply iterate over spikes
float gu = urand[j] * pWGS[ki*N+j]/pW[k_ind];
// Iterate over spikes on ki to find a new parent
// We can run into trouble if gu ~= 1.0. To avoid this,
// only iterate to the second to last spike.
float cumPr = 0.0;
for (int GS_off=pCols[j]; GS_off < pCols[j+1]; GS_off++)
{
if (pC[pRowIndices[GS_off]]==ki)
{
cumPr += pGS[GS_off];
// If the cumulative GS sum exceeds the threshold, this is the parent
if (cumPr > gu)
{
pZ[j] = pRowIndices[GS_off];
break;
}
}
}
}
}
}
/**
* Compute the log likelihood term corresponding to each spike.
* Namely, log of the instantaneous intensity of the process which
* parented the spike.
*/
__global__ void computeLogLkhdPerSpike(int K,
int N,
int* pC,
float* pLam,
bool* pA,
float* pW,
float* pGS,
int* pCols,
int* pRowIndices,
float* pLL
)
{
int x = threadIdx.x;
int n = blockIdx.y * gridDim.x * blockDim.x +
blockIdx.x * blockDim.x +
threadIdx.x;
__shared__ float ll[B];
ll[x] = 0.0;
if (n<N)
{
// TODO: Use computeWGSForAllSpikes
// Log likelihood is the log of the total rate
int c_n = pC[n];
float lambda = pLam[c_n*N+n];
for (int row=pCols[n]; row<pCols[n+1]; row++)
{
int row_ind = pRowIndices[row];
int c_row_ind = pC[row_ind];
int c_ind = c_row_ind*K + c_n;
lambda += (pA[c_ind] * pW[c_ind] * pGS[row]);
}
ll[x] = logf(lambda);
}
else
{
ll[x] = 0.0;
}
// Sum up the log likelihoods
float llSum;
reduceBlock(ll, &llSum, OP_SUM);
// First thread in the block sets the output
if (x==0)
{
int block = blockIdx.y * gridDim.x + blockIdx.x;
pLL[block] = llSum;
}
}
/**
* Compute the log likelihood term corresponding to each spike.
* Namely, log of the instantaneous intensity of the process which
* parented the spike.
*/
__global__ void computeConditionalIntensity(int K,
int Nt,
int* pC,
bool* pA,
float* pW,
float* pGS,
int* pCols,
int* pRowIndices,
float* pCond
)
{
int t = threadIdx.x + blockIdx.x*blockDim.x;
int c_t = blockIdx.y;
if (t < Nt)
{
pCond[c_t*Nt+t] = 0;
// Iterate over potential spike parents
for (int row=pCols[t]; row<pCols[t+1]; row++)
{
int n = pRowIndices[row];
int c_n = pC[n];
if (pA[c_n*K+c_t])
{
pCond[c_t*Nt+t] += pW[c_n*K+c_t] * pGS[row];
}
}
}
}
| 0720e66d5215d395a56fd8ec971688ba11be7293.cu | #include <cuda.h>
#include <math_constants.h>
#define OP_SUM 0
#define OP_MULT 1
#define MH_ADD 0
#define MH_DEL 1
#define MH_NOP 2
const int B = %(B)s; // blockDim.x
/**
* Helper function to sum across a block.
* Assume pS_data is already in shared memory
* Only the first thread returns a value in pSum
*/
__device__ void reduceBlock( float pSdata[B], float* pSum, int op )
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
// Sync all threads across the block
__syncthreads();
// Calculate the minimum value by doing a reduction
int half = (blockDim.x*blockDim.y) / 2;
if( idx < half )
{
while( half > 0 )
{
if(idx < half)
{
switch(op)
{
case OP_SUM:
pSdata[idx] = pSdata[idx] + pSdata[idx + half];
break;
case OP_MULT:
pSdata[idx] = pSdata[idx] * pSdata[idx + half];
break;
default:
// default to the identity
// TODO: throw error?
pSdata[idx] = pSdata[idx];
break;
}
}
half = half / 2;
__syncthreads();
}
}
// Store the minimum value back to global memory
if (idx == 0)
{
pSum[0] = pSdata[0];
}
}
/**
* Helper function to sum across a block.
* Assume pS_data is already in shared memory
* Only the first thread returns a value in pSum
*/
__device__ void reduceBlockDouble( double pSdata[B], double* pSum, int op )
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
// Sync all threads across the block
__syncthreads();
// Calculate the minimum value by doing a reduction
int half = (blockDim.x*blockDim.y) / 2;
if( idx < half )
{
while( half > 0 )
{
if(idx < half)
{
switch(op)
{
case OP_SUM:
pSdata[idx] = pSdata[idx] + pSdata[idx + half];
break;
case OP_MULT:
pSdata[idx] = pSdata[idx] * pSdata[idx + half];
break;
default:
// default to the identity
// TODO: throw error?
pSdata[idx] = pSdata[idx];
break;
}
}
half = half / 2;
__syncthreads();
}
}
// Store the minimum value back to global memory
if (idx == 0)
{
pSum[0] = pSdata[0];
}
}
/**
* Compute the weighted sum along columns of G as required for
* sampling new parents.
* This should be launched on a KxN/B grid of 1024x1 blocks.
*/
__global__ void computeWGSForAllSpikes(int K,
int N,
int* pC,
float* pGS,
int* pCols,
int* pRowInds,
float* pW,
bool* pA,
float* pWGS
)
{
int ki = blockIdx.y;
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j<N)
{
int kj = pC[j];
int k_ind = ki*K+kj;
// W is always populated, regardless of whether or not an edge exists
// float w = pA[k_ind] ? pW[k_ind] : 0.0;
float w = pW[k_ind];
// If the effective weight is greater than 0, compute the weighted
// sum along the column of GS
// if (w > 0.0)
// {
float gs_sum = 0.0;
// Iterate over row entries for dS column jj
// Since dS is stored in CSC format, pCols gives the
// pointers into dS for each column
for (int gs_off=pCols[j]; gs_off<pCols[j+1]; gs_off++)
{
if (pC[pRowInds[gs_off]]==ki)
{
gs_sum += pGS[gs_off];
}
}
// Multiply by the current weight
pWGS[ki*N+j] = w * gs_sum;
// }
// else
// {
// pWGS[ki*N+j] = 0.0;
// }
}
}
/**
* Compute the weighted sum along columns of G for a single block.
* This overrides the adjacency matrix and assumes an edge exists.
* This is necessary for computation of the Q ratio when a new edge
* is proposed.
* This should be launched on a 1xN/B grid of 1024x1 blocks.
*/
__global__ void computeWGSForNewEdge(int ki,
int kj,
int K,
int N,
int* pC,
float* pGS,
int* pCols,
int* pRowInds,
float* pW,
float* pWGS
)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
int k_ind = ki*K + kj;
int wgsOffset = ki*N;
float w = pW[k_ind];
if (j<N && pC[j]==kj)
{
float gs_sum = 0.0;
// Iterate over row entries for dS column jj
// Since dS is stored in CSC format, pCols gives the
// pointers into dS for each column
for (int gs_off=pCols[j]; gs_off<pCols[j+1]; gs_off++)
{
if (pC[pRowInds[gs_off]]==ki)
{
gs_sum += pGS[gs_off];
}
}
// Multiply by the current weight
pWGS[wgsOffset+j] = w * gs_sum;
}
}
/**
* Clear the WGS entries for a deleted edge
* This should be launched on a 1xNs[kj]/B grid of 1024x1 blocks.
*/
__global__ void clearWGSForDeletedEdge(int ki,
int kj,
int N,
int* pC,
float* pWGS
)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
int wgsOffset = ki*N;
if (j<N && pC[j]==kj)
{
pWGS[wgsOffset+j] = 0.0;
}
}
/**
* Compute the MH acceptance probability of adding edge (i,j)
* This should be launched on a grid of (1xceil(Ns[kj]/B)) blocks
* each with 1024 threads. The output should be a float vector
* of size (1x ceil(Ns[kj]/B))
*
* For stability we compute this in log space
*
*/
__global__ void computeProdQratio(int kj,
int K,
int N,
int* pC,
float* pWGS,
float* pLam,
int affectedRow,
int mhOp,
double* qr
)
{
int x = threadIdx.x;
int j = blockIdx.x * blockDim.x + x;
// The ratio of Q's invovles a product over all spikes
// we use shared memory to store the partial products each
// thread computes and then reduce as above
__shared__ double logQrat[B];
logQrat[x] = 0.0;
if (j<N && pC[j]==kj)
{
float lam = pLam[kj*N+j];
double wgsNum = lam; // numerator: sum over WGS's current and changed rows
double wgsDen = lam; // denomenator: sum over WGS's current rows
for (int ki=0; ki<K; ki++)
{
int wgsOffset = ki*N;
if (ki != affectedRow)
{
// both numerator and denominator match
wgsNum += pWGS[wgsOffset+j];
wgsDen += pWGS[wgsOffset+j];
}
else if (mhOp == MH_ADD)
{
// WGS computed with proposed edge weight
// Current rows sum to 0 since no edge present before
wgsNum += pWGS[wgsOffset+j];
}
else
{
// Edge is being removed
// WGS was computed with current edge and
// weight does not contribute to numerator
wgsDen += pWGS[wgsOffset+j];
}
}
logQrat[x] = logf(wgsNum) - logf(wgsDen);
}
// Take the product of Qrat by reduction
double sumLogQrat = 0.0;
reduceBlockDouble(logQrat, &sumLogQrat, OP_SUM);
if (x==0)
{
qr[blockIdx.x] = sumLogQrat;
}
}
__global__ void computeLkhdRatioA(int ki,
int kj,
int K,
int N,
bool* pA,
int* pC,
float* pWGS,
float* pLam,
float* lr
)
{
int x = threadIdx.x;
int j = blockIdx.x * blockDim.x + x;
// The ratio of Q's invovles a product over all spikes
// we use shared memory to store the partial products each
// thread computes and then reduce as above
__shared__ float logLkhdRat[B];
logLkhdRat[x] = 0.0;
if (j<N && pC[j]==kj)
{
float lam = pLam[kj*N+j];
float wgsNum = lam; // numerator: sum over WGS's current and changed rows
float wgsDen = lam; // denomenator: sum over WGS's current rows
for (int ii=0; ii<K; ii++)
{
int wgsOffset = ii*N;
if (ii != ki)
{
if (pA[ii*K+kj])
{
// both numerator and denominator match
wgsNum += pWGS[wgsOffset+j];
wgsDen += pWGS[wgsOffset+j];
}
}
else
{
// Only add WGS to the numerator for potential edge ki
wgsNum += pWGS[wgsOffset+j];
}
}
logLkhdRat[x] = logf(wgsNum) - logf(wgsDen);
}
// Take the product of Qrat by reduction
float sumLogLkhdRat = 0.0;
reduceBlock(logLkhdRat, &sumLogLkhdRat, OP_SUM);
if (x==0)
{
atomicAdd(lr,sumLogLkhdRat);
}
}
/**
* Sample a random value of A[ki,kj] given the two terms
* which add up to the log odds ratio and a random variate p
*/
__global__ void sampleA(int ki,
int kj,
int K,
bool* pA,
float logpratio,
float* pLogqratio,
float p
)
{
float logit_pr_A = logpratio + (*pLogqratio);
// compute log_pr_A using log-sum-exp trick
// log_pr_A = -logf(1.0+expf(-logit_pr_A))
float log_pr_A = 0.0;
if (logit_pr_A < 0.0)
{
log_pr_A = logit_pr_A - logf(1.0+expf(logit_pr_A));
}
else
{
log_pr_A = -logf(1.0+exp(-logit_pr_A));
}
// Sample A
if (logf(p) < log_pr_A)
{
pA[ki*K+kj] = true;
}
else
{
pA[ki*K+kj] = false;
}
}
/**
* Assign new parents neurons for each spike on k
* Launch this on 1xceil(N/B) grid of blocks
*/
__global__ void sampleNewParentProcs(int K,
int N,
int* pC,
bool* pA,
float* pW,
float* pWGS,
float* pLam,
float* urand,
int* pZ_temp
)
{
// TODO: index using both x and y for large datasets
int j = blockIdx.x*blockDim.x + threadIdx.x;
// Sum columns of WGS, taking into account the deleted row
// in the case of an edge removal
if (j<N)
{
int kj = pC[j];
float lam = pLam[kj*N+j];
// Clear whatever existing parent might exist
pZ_temp[j] = -1;
// In the first pass compute the cumulative sum.
// In the second pass we'll use this information to
// determine which process contains the parent spike.
float wgsSum = lam;
for (int ki=0; ki<K; ki++)
{
if (pA[ki*K+kj])
{
wgsSum += pWGS[ki*N+j];
}
}
// Check if this block is responsible for assigning
// a new parent based on the value of urand
float wu = urand[j] * wgsSum;
if (wu <= lam)
{
pZ_temp[j] = -1;
return;
}
wgsSum = lam;
for (int ki=0; ki<K; ki++)
{
if (pA[ki*K+kj])
{
wgsSum += pWGS[ki*N+j];
}
if (wgsSum >= wu)
{
pZ_temp[j] = ki;
break;
}
}
}
}
/**
* Assign new parents Z for each spike on a given process.
* This should be called with a grid of K x ceil(N/B)) blocks,
* each of which has B threads.
* Assume that an array of Ns[kj] uniform random
* numbers between [0,1] is present in the array urand. These will
* be used to identify the parent neuron and then the parent spike.
*/
__global__ void sampleNewParentSpikes(int K,
int N,
int* pC,
int* pZ_temp,
float* pGS,
int* pRowIndices,
int* pCols,
float* pW,
float* pWGS,
int* pZ,
float* urand
)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
int ki = blockIdx.y;
// Sum columns of WGS, taking into account the deleted row
// in the case of an edge removal
if (j<N)
{
// Find kj
int kj=pC[j];
int k_ind = ki*K + kj;
// Clear whatever existing parent might exist
if (ki==0 && pZ_temp[j]==-1)
{
pZ[j] = -1;
return;
}
if (pZ_temp[j]==ki)
{
// Check if this block is responsible for assigning
// a new parent based on the value of urand
// Update u so that in the subsequent step we can
// simply iterate over spikes
float gu = urand[j] * pWGS[ki*N+j]/pW[k_ind];
// Iterate over spikes on ki to find a new parent
// We can run into trouble if gu ~= 1.0. To avoid this,
// only iterate to the second to last spike.
float cumPr = 0.0;
for (int GS_off=pCols[j]; GS_off < pCols[j+1]; GS_off++)
{
if (pC[pRowIndices[GS_off]]==ki)
{
cumPr += pGS[GS_off];
// If the cumulative GS sum exceeds the threshold, this is the parent
if (cumPr > gu)
{
pZ[j] = pRowIndices[GS_off];
break;
}
}
}
}
}
}
/**
* Compute the log likelihood term corresponding to each spike.
* Namely, log of the instantaneous intensity of the process which
* parented the spike.
*/
__global__ void computeLogLkhdPerSpike(int K,
int N,
int* pC,
float* pLam,
bool* pA,
float* pW,
float* pGS,
int* pCols,
int* pRowIndices,
float* pLL
)
{
int x = threadIdx.x;
int n = blockIdx.y * gridDim.x * blockDim.x +
blockIdx.x * blockDim.x +
threadIdx.x;
__shared__ float ll[B];
ll[x] = 0.0;
if (n<N)
{
// TODO: Use computeWGSForAllSpikes
// Log likelihood is the log of the total rate
int c_n = pC[n];
float lambda = pLam[c_n*N+n];
for (int row=pCols[n]; row<pCols[n+1]; row++)
{
int row_ind = pRowIndices[row];
int c_row_ind = pC[row_ind];
int c_ind = c_row_ind*K + c_n;
lambda += (pA[c_ind] * pW[c_ind] * pGS[row]);
}
ll[x] = logf(lambda);
}
else
{
ll[x] = 0.0;
}
// Sum up the log likelihoods
float llSum;
reduceBlock(ll, &llSum, OP_SUM);
// First thread in the block sets the output
if (x==0)
{
int block = blockIdx.y * gridDim.x + blockIdx.x;
pLL[block] = llSum;
}
}
/**
* Compute the log likelihood term corresponding to each spike.
* Namely, log of the instantaneous intensity of the process which
* parented the spike.
*/
__global__ void computeConditionalIntensity(int K,
int Nt,
int* pC,
bool* pA,
float* pW,
float* pGS,
int* pCols,
int* pRowIndices,
float* pCond
)
{
int t = threadIdx.x + blockIdx.x*blockDim.x;
int c_t = blockIdx.y;
if (t < Nt)
{
pCond[c_t*Nt+t] = 0;
// Iterate over potential spike parents
for (int row=pCols[t]; row<pCols[t+1]; row++)
{
int n = pRowIndices[row];
int c_n = pC[n];
if (pA[c_n*K+c_t])
{
pCond[c_t*Nt+t] += pW[c_n*K+c_t] * pGS[row];
}
}
}
}
|
fefdbacada91a746445456dcfc0164d16ddd2c99.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef IC_mod
#define IC_mod
#pragma once
// G.K.H. Lee - PA Noti
// translation from from Fortran to C++ by PA Noti - Feb 2021
// last changes in Fortran by GKH-Lee - Oct 2020
#pragma once
#include <hip/hip_runtime.h>
#include <string>
#include <iostream>
//#include "FMS_RC_para_and_const.h"
//#include "k_Rosseland_mod.h"
//#include <math.h>
// Calculates the IR band Rosseland mean opacity (local T) according to the
// Freedman et al. (2014) fit and coefficents
void k_Ross_Freedman(double Tin, double Pin, double met, double& k_IR) {
// dependcies
//// pow from math
//// log10 from math
//// atan from math
//// onedivpi -> namespace constants::onedivpi
// Input:
// T - Local gas temperature [K]
// P - Local gas pressure [pa]
// met - Local metallicity [M/H] (log10 from solar, solar [M/H] = 0.0)
// Call by reference (Input&Output):
// k_IR - IR band Rosseland mean opacity [m2 kg-1]
// work variables
double k_lowP;
double k_hiP;
double T;
double P;
double Tl10;
double Pl10;
const double pi = atan((double)(1)) * 4;
const double onedivpi = 1.0 / pi;
// Coefficent parameters for Freedman et al. (2014) table fit
double c1 = 10.602;
double c2 = 2.882;
double c3 = 6.09e-15;
double c4 = 2.954;
double c5 = -2.526;
double c6 = 0.843;
double c7 = -5.490;
double c8_l = -14.051, c8_h = 82.241;
double c9_l = 3.055, c9_h = -55.456;
double c10_l = 0.024, c10_h = 8.754;
double c11_l = 1.877, c11_h = 0.7048;
double c12_l = -0.445, c12_h = -0.0414;
double c13_l = 0.8321, c13_h = 0.8321;
// start operations
T = Tin;
P = Pin * ((double)10.0); // Convert to dyne cm-2
Tl10 = log10(T);
Pl10 = log10(P);
// Low pressure expression
k_lowP = c1 * atan(Tl10 - c2) -
(c3 / (Pl10 + c4)) * exp(pow((Tl10 - c5), 2.0)) + c6 * met + c7;
// De log10
k_lowP = pow(((double)10.0), k_lowP);
// Temperature split for coefficents = 800 K
if (T <= 800.0)
{
k_hiP = c8_l + c9_l * Tl10 + c10_l * pow(Tl10, 2.0) +
Pl10 * (c11_l + c12_l * Tl10) +
c13_l * met * (0.5 + onedivpi * atan((Tl10 - ((double)2.5)) / (double)0.2));
}
else
{
k_hiP = c8_h + c9_h * Tl10 +
c10_h * pow(Tl10, 2.0) + Pl10 * (c11_h + c12_h * Tl10) +
c13_h * met * (0.5 + onedivpi * atan((Tl10 - ((double)2.5)) / (double)0.2));
}
// De log10
k_hiP = pow(((double)10.0), k_hiP);
// Total Rosseland mean opacity - converted to m2 kg-1
k_IR = (k_lowP + k_hiP) / ((double)10.0);
// Avoid divergence in fit for large values
if (k_IR > 1.0e10)
{
k_IR = 1.0e10;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Calculates 3 band grey visual gamma values and 2 picket fence IR gamma values
// according to the coefficents and equations in:
// Parmentier & Menou (2014) and Parmentier et al. (2015)
// NOTE: This does not calculate the opacity - call k_Ross_Freedman for that
void gam_Parmentier(double Teff, int table_num, double(&gam_V)[3], double(&Beta_V)[3],
double(&Beta)[2], double& gam_1, double& gam_2, double& gam_P, double& tau_lim) {
// dependcies
//// pow from math
//// log10 from math
// Input:
// Teff - Effective temperature [K] (See Parmentier papers for various ways to calculate this)
// for non-irradiated atmosphere Teff = Tint
// table_num - Table selection from Parmentier et al. (2015): 1 = w. TiO/VO, 2 = w.o. TiO/VO
// Call by reference (Input&Output):
// gam_V(3) - gamma ratio for 3 visual bands (gam_V = kV_Ross/kIR_Ross)
// beta_V(3) - fraction of total incident stellar flux in band (1/3 for Parmentier values)
// Beta - equilvalent bandwidth for picket fence IR model
// gam_1 - gamma ratio for IR band 1 (gam_1 = kIR_1/kIR_Ross)
// gam_2 - gamma ratio for IR band 2 (gam_2 = kIR_2/kIR_Ross)
// gam_P - gamma ratio for Planck mean (gam_P = kIR_Planck/kIR_Ross)
// tau_lim - tau limit variable (usually for IC system)
// work variables
double R = 0;
double aP = 0;
double bP = 0;
double cP = 0;
double aV1 = 0, bV1 = 0, aV2 = 0, bV2 = 0, aV3 = 0, bV3 = 0;
double aB = 0, bB = 0;
double l10T = 0, l10T2 = 0, RT = 0;
int i;
// start operations
// Log 10 T_eff variables
l10T = log10(Teff);
l10T2 = pow(l10T, 2.0);
if (table_num == 1) {
// First table in Parmentier et al. (2015) w. TiO/VO
// Start large if statements with visual band and Beta coefficents
if (Teff <= 200.0)
{
aV1 = -5.51; bV1 = 2.48;
aV2 = -7.37; bV2 = 2.53;
aV3 = -3.03; bV3 = -0.20;
aB = 0.84; bB = 0.0;
}
else if (Teff > 200.0 && Teff <= 300.0)
{
aV1 = 1.23; bV1 = -0.45;
aV2 = 13.99; bV2 = -6.75;
aV3 = -13.87; bV3 = 4.51;
aB = 0.84; bB = 0.0;
}
else if (Teff > 300.0 && Teff <= 600.0)
{
aV1 = 8.65; bV1 = -3.45;
aV2 = -15.18; bV2 = 5.02;
aV3 = -11.95; bV3 = 3.74;
aB = 0.84; bB = 0.0;
}
else if (Teff > 600.0 && Teff <= 1400.0)
{
aV1 = -12.96; bV1 = 4.33;
aV2 = -10.41; bV2 = 3.31;
aV3 = -6.97; bV3 = 1.94;
aB = 0.84; bB = 0.0;
}
else if (Teff > 1400.0 && Teff < 2000.0)
{
aV1 = -23.75; bV1 = 7.76;
aV2 = -19.95; bV2 = 6.34;
aV3 = -3.65; bV3 = 0.89;
aB = 0.84; bB = 0.0;
}
else if (Teff >= 2000.0)
{
aV1 = 12.65; bV1 = -3.27;
aV2 = 13.56; bV2 = -3.81;
aV3 = -6.02; bV3 = 1.61;
aB = 6.21; bB = -1.63;
}
// gam_P coefficents
aP = -2.36;
bP = 13.92;
cP = -19.38;
}
else if (table_num == 2)
{
// ! Appendix table from Parmentier et al. (2015) - without TiO and VO
if (Teff <= 200.0)
{
aV1 = -5.51; bV1 = 2.48;
aV2 = -7.37; bV2 = 2.53;
aV3 = -3.03; bV3 = -0.20;
aB = 0.84; bB = 0.0;
}
else if (Teff > 200.0 && Teff <= 300.0)
{
aV1 = 1.23; bV1 = -0.45;
aV2 = 13.99; bV2 = -6.75;
aV3 = -13.87; bV3 = 4.51;
aB = 0.84; bB = 0.0;
}
else if (Teff > 300.0 && Teff <= 600.0)
{
aV1 = 8.65; bV1 = -3.45;
aV2 = -15.18; bV2 = 5.02;
aV3 = -11.95; bV3 = 3.74;
aB = 0.84; bB = 0.0;
}
else if (Teff > 600.0 && Teff <= 1400.0)
{
aV1 = -12.96; bV1 = 4.33;
aV2 = -10.41; bV2 = 3.31;
aV3 = -6.97; bV3 = 1.94;
aB = 0.84; bB = 0.0;
}
else if (Teff > 1400.0 && Teff < 2000.0)
{
aV1 = -1.68; bV1 = 0.75;
aV2 = 6.96; bV2 = -2.21;
aV3 = 0.02; bV3 = -0.28;
aB = 3.0; bB = -0.69;
}
else if (Teff >= 2000.0)
{
aV1 = 10.37; bV1 = -2.91;
aV2 = -2.4; bV2 = 0.62;
aV3 = -16.54; bV3 = 4.74;
aB = 3.0; bB = -0.69;
}
// gam_P coefficents
if (Teff <= 1400.0)
{
aP = -2.36;
bP = 13.92;
cP = -19.38;
}
else
{
aP = -12.45;
bP = 82.25;
cP = -134.42;
}
}
// Calculation of all values
// Visual band gamma
gam_V[0] = pow(((double)10.0), (aV1 + bV1 * l10T));
gam_V[1] = pow(((double)10.0), (aV2 + bV2 * l10T));
gam_V[2] = pow(((double)10.0), (aV3 + bV3 * l10T));
// Visual band fractions
for (i = 0; i < 3; i++)
{
Beta_V[i] = ((double)1.0) / ((double)3.0);
}
// gamma_Planck - if < 1 then make it grey approximation (k_Planck = k_Ross, gam_P = 1)
gam_P = pow(((double)10.0), (aP * l10T2 + bP * l10T + cP));
if (gam_P < 1.0000001)
{
gam_P = 1.0000001;
}
// equivalent bandwidth value
Beta[0] = aB + bB * l10T;
Beta[1] = (1.0) - Beta[0];
// IR band kappa1/kappa2 ratio - Eq. 96 from Parmentier & Menou (2014)
RT = (gam_P - 1.0) / (2.0 * Beta[0] * Beta[1]);
R = 1.0 + RT + sqrt(pow(RT, 2.0) + RT);
// gam_1 and gam_2 values - Eq. 92, 93 from Parmentier & Menou (2014)
gam_1 = Beta[0] + R - Beta[0] * R;
gam_2 = gam_1 / R;
// Calculate tau_lim parameter
tau_lim = ((double)1.0) / (gam_1 * gam_2) * sqrt(gam_P / ((double)3.0));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Calculates the Bond Albedo according to Parmentier et al. (2015) expression
void Bond_Parmentier(double Teff0, double grav, double& AB) {
// dependcies
//// pow from math
//// log10 from math
// Input:
// Teff0 - Atmospheric profile effective temperature [K] with zero albedo
// grav - Surface gravity of planet [m s-2]
// Call by reference (Input&Output):
// AB - Bond albedo
// work variables
double a, b;
// start operations
if (Teff0 <= 250.0)
{
a = ((double)-0.335) * pow(grav, ((double)0.070));
b = 0.0;
}
else if (Teff0 > 250.0 && Teff0 <= 750.0)
{
a = -0.335 * pow(grav, ((double)0.070)) + 2.149 * pow(grav, ((double)0.135));
b = -0.896 * pow(grav, ((double)0.135));
}
else if (Teff0 > 750.0 && Teff0 < 1250.0)
{
a = -0.335 * pow(grav, ((double)0.070)) - 0.428 * pow(grav, ((double)0.135));
b = 0.0;
}
else if (Teff0 >= 1250.0)
{
a = 16.947 - ((double)3.174) * pow(grav, ((double)0.070)) - 4.051 *
pow(grav, ((double)0.135));
b = -5.472 + ((double)0.917) * pow(grav, ((double)0.070)) + 1.170 *
pow(grav, ((double)0.135));
}
// Final Bond Albedo expression
AB = pow(((double)10.0), (a + b * log10(Teff0)));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// This subroutine follows Parmentier & Guillot (2014, 2015) non-grey picket fence scheme
void Parmentier_IC(const int nlay, double* pl,
double* pe, double Tint, double mu, double Tirr,
double grav, double* (&Tl), int table_num, double met,
double* tau, double* kRoss) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay1 -> "FMS_RC_para_&_const.cpp"
//// k_Rosseland_mod::Bond_Parmentier -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Freedman -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Valencia -> "k_Rosseland_modcpp"
//// pow -> math
//// sqrt -> math
//// exp -> math
// Input:
//
// Call by reference (Input & Output):
//
// work variables
int i, j, k;
double Teff0, Teff, Tmu, Bond, Tskin;
double gam_V[3] = { 0 }, Beta_V[3] = { 0 };
double Beta[2];
double gam_1, gam_2, gam_P, tau_lim;
double a0, a1, b0, A, B, At1, At2;
double a2[3], a3[3], b1[3], b2[3], b3[3], Av1[3], Av2[3];
double C[3], D[3], E[3];
double summy;
// start operations
// Effective temperature parameter
Tmu = pow((mu * pow(Tirr, 4.0)), (1.0 / 4.0));
// Find Bond albedo of planet - Bond albedo is given by mu = 1/sqrt(3)
Teff0 = pow(((pow(Tint, 4.0) + (1.0 / sqrt(((double)3.0))) * pow(Tirr, 4.0))), (1.0 / 4.0));
Bond_Parmentier(Teff0, grav, Bond);
Teff = pow((pow(Tint, 4.0) + (((double)1.0) - Bond) * mu * pow(Tirr, 4.0)), (1.0 / 4.0));
// Find the V band gamma, beta and IR gamma and beta ratios for this profile
// Passed mu, so make lat = acos(mu) and lon = 0
gam_Parmentier(Teff, table_num, gam_V, Beta_V, Beta, gam_1, gam_2,
gam_P, tau_lim);
for (i = 0; i < 3; i++)
{
gam_V[i] = gam_V[i] / mu;
}
// Hard work starts here - first calculate all the required coefficents
At1 = pow(gam_1, 2.0) * log(1.0 + 1.0 / (tau_lim * gam_1));
At2 = pow(gam_2, 2.0) * log(1.0 + 1.0 / (tau_lim * gam_2));
for (i = 0; i < 3; i++)
{
Av1[i] = pow(gam_1, 2.0) * log(1.0 + gam_V[i] / gam_1);
Av2[i] = pow(gam_2, 2.0) * log(1.0 + gam_V[i] / gam_2);
}
a0 = 1.0 / gam_1 + 1.0 / gam_2;
a1 = -1.0 / (((double)3.0) * pow(tau_lim, 2.0)) * (gam_P / (1.0 - gam_P) *
(gam_1 + gam_2 - 2.0) / (gam_1 + gam_2) +
(gam_1 + gam_2) * tau_lim - (At1 + At2) * pow(tau_lim, 2.0));
for (i = 0; i < 3; i++)
{
a2[i] = pow(tau_lim, 2.0) / (gam_P * pow(gam_V[i], 2.0)) *
((3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) * (3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)) *
(gam_1 + gam_2) - 3.0 * gam_V[i] * (6.0 * pow(gam_1, 2.0) * pow(gam_2, 2.0) - pow(gam_V[i], 2.0) *
(pow(gam_1, 2.0) + pow(gam_2, 2.0)))) / (1.0 - pow(gam_V[i], 2.0) * pow(tau_lim, 2.0));
a3[i] = -pow(tau_lim, 2.0) * (3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) *
(3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)) * (Av2[i] + Av1[i]) /
(gam_P * pow(gam_V[i], 3.0) * (1.0 - pow(gam_V[i], 2.0) * pow(tau_lim, 2.0)));
b1[i] = gam_1 * gam_2 * (3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) * (3.0 * pow(gam_2, 2.0) -
pow(gam_V[i], 2.0)) * pow(tau_lim, 2) / (gam_P * pow(gam_V[i], 2.0) *
(pow(gam_V[i], 2.0) * pow(tau_lim, 2.0) - 1.0));
b2[i] = 3.0 * (gam_1 + gam_2) * pow(gam_V[i], 3.0) / ((3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) *
(3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)));
b3[i] = (Av2[i] - Av1[i]) / (gam_V[i] * (gam_1 - gam_2));
}
b0 = 1.0 / (gam_1 * gam_2 / (gam_1 - gam_2) * (At1 - At2) / 3.0 - pow((gam_1 * gam_2), 2.0) /
sqrt(3.0 * gam_P) - pow((gam_1 * gam_2), 3.0) /
((1.0 - gam_1) * (1.0 - gam_2) * (gam_1 + gam_2)));
A = 1.0 / ((double)3.0) * (a0 + a1 * b0);
B = -1.0 / ((double)3.0) * pow((gam_1 * gam_2), 2.0) / gam_P * b0;
for (i = 0; i < 3; i++)
{
C[i] = -1.0 / ((double)3.0) * (b0 * b1[i] * (1.0 + b2[i] + b3[i]) * a1 + a2[i] + a3[i]);
D[i] = 1.0 / ((double)3.0) * pow((gam_1 * gam_2), 2.0) / gam_P * b0 * b1[i] * (1.0 + b2[i] + b3[i]);
E[i] = (3.0 - pow((gam_V[i] / gam_1), 2.0)) * (3.0 - pow((gam_V[i] / gam_2), 2.0)) /
(9.0 * gam_V[i] * (pow((gam_V[i] * tau_lim), 2.0) - 1.0));
}
// T-p structure calculation - we follow exactly V. Parmentier's method
// Estimate the skin temperature by setting tau = 0
tau[0] = 0.0;
summy = 0.0;
for (i = 0; i < 3; i++)
{
summy += 3.0 * Beta_V[i] * pow(Tmu, 4.0) / 4.0 * (C[i] + D[i] * exp(-tau[0] / tau_lim) +
E[i] * exp(-gam_V[i] * tau[0]));
}
Tskin = 3.0 * pow(Tint, 4) / 4.0 * (tau[0] + A + B * exp(-tau[0] / tau_lim)) + summy;
Tskin = pow(Tskin, (1.0 / 4.0));
// Estimate the opacity TOA at the skin temperature - assume this is = first layer optacity
k_Ross_Freedman(Tskin, pl[0], met, kRoss[0]);
// k_Rosseland_mod::k_Ross_Valencia(Tskin, pe[0], met, kRoss[0]);
// Recalculate the upmost tau with new kappa
tau[0] = kRoss[0] / grav * pl[0];
// More accurate layer T at uppermost layer
summy = 0.0;
for (i = 0; i < 3; i++)
{
summy += 3.0 * Beta_V[i] * pow(Tmu, 4.0) / 4.0 * (C[i] + D[i] * exp(-tau[0] / tau_lim) +
E[i] * exp(-gam_V[i] * tau[0]));
}
Tl[0] = 3.0 * pow(Tint, 4) / 4.0 * (tau[0] + A + B * exp(-tau[0] / tau_lim)) + summy;
Tl[0] = pow(Tl[0], (1.0 / 4.0));
// Now we can loop in optical depth space to find the T-p profile
for (i = 1; i < nlay; i++)
{
// Initial guess for layer
k_Ross_Freedman(Tl[i - 1], sqrt(pl[i - 1] * pl[i]), met, kRoss[i]);
// call k_Rosseland_mod::k_Ross_Valencia(Tl[i-1], sqrt(pl[i-1]*pl[i], met, kRoss[i])
tau[i] = tau[i - 1] + kRoss[i] / grav * (pl[i] - pl[i - 1]);
summy = 0.0;
for (j = 0; j < 3; j++)
{
summy = +3.0 * Beta_V[j] * pow(Tmu, 4.0) / 4.0 * (C[j] + D[j] * exp(-tau[i] / tau_lim) +
E[j] * exp(-gam_V[j] * tau[i]));
}
Tl[i] = 3.0 * pow(Tint, 4.0) / 4.0 * (tau[i] + A + B * exp(-tau[i] / tau_lim)) + summy;
Tl[i] = pow(Tl[i], (1.0 / 4.0));
// Convergence loop
for (j = 0; j < 5; j++)
{
k_Ross_Freedman(sqrt(Tl[i - 1] * Tl[i]), sqrt(pl[i - 1] * pl[i]), met, kRoss[i]);
//call k_Rosseland_mod::k_Ross_Valencia(sqrt(Tl[i-1]*T[i]), sqrt(pl[i-1]*pl[i]), met, kRoss[i])
tau[i] = tau[i - 1] + kRoss[i] / grav * (pl[i] - pl[i - 1]);
summy = 0.0;
for (k = 0; k < 3; k++)
{
summy += 3.0 * Beta_V[k] * pow(Tmu, 4.0) / 4.0 * (C[k] + D[k] * exp(-tau[i] / tau_lim) +
E[k] * exp(-gam_V[k] * tau[i]));
}
Tl[i] = 3.0 * pow(Tint, 4.0) / 4.0 * (tau[i] + A + B * exp(-tau[i] / tau_lim)) + summy;
Tl[i] = pow(Tl[i], (1.0 / 4.0));
}
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Subroutine that corrects for adiabatic region following Parmentier & Guillot (2015)
void adiabat_correction(int nlay, double* (&Tl),
double* pl, double& prc, double* gradrad, double* gradad) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// pow -> math
/// log10 -> math
// Input:
//
// Call by reference (Input & Output):
//
// work variables
int i, iRC, iRC1;
// start operations
for (i = 0; i < (nlay - 1); i++)
{
gradrad[i] = (log10(Tl[i]) - log10(Tl[i + 1])) / (log10(pl[i]) - log10(pl[i + 1]));
gradad[i] = ((double)0.32) - ((double)0.10) * Tl[i] / ((double)3000.0);
}
gradrad[nlay - 1] = 0.0;
gradad[nlay - 1] = 0.0;
iRC = nlay - 2;
iRC1 = nlay - 2;
for (i = (nlay - 2); i >= 0; i--)
{
if (iRC1 <= i + 1)
{
if (gradrad[i] > ((double)0.7) * gradad[i])
{
iRC1 = i;
}
if (gradrad[i] > ((double)0.98) * gradad[i])
{
iRC = i;
prc = pl[iRC];
}
}
}
if (iRC < nlay)
{
for (i = iRC; i < nlay - 1; i++)
{
gradad[i] = (double)0.32 - ((double)0.10) * Tl[i] / ((double)3000.0);
if (gradad[i] < 0.0)
{
gradad[i] = 0.0;
}
Tl[i + 1] = Tl[i] * pow((pl[i + 1] / pl[i]), gradad[i]);
}
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
void IC_profile(int iIC, bool corr, int nlay, double p0,
double* pl, double* pe,
double* k_V, double* k_IR,
double Tint, double mu, double Tirr, double grav, double fl,
double* Tl, double& prc, int table_num, double met,
double* tau_hf_e, double* kRoss_hf_e, double* tau_IRl_hf_l,
double* gradrad_hf_l, double* gradad_hf_l) {
// dependcies
//// pow() from math
//// log10() from math
//// atan() from math
//// namespace constants::onedivpi -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay1 -> "FMS_RC_para_&_const.cpp"
//// k_Rosseland_mod::Bond_Parmentier() -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Freedman() -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Valencia() -> "k_Rosseland_modcpp"
//// sqrt() -> math
//// exp() -> math
// Input:
//
// Call by reference (Input & Output):
//
// start operations
switch (iIC)
{
case 4:
Parmentier_IC(nlay, pl, pe, Tint, mu, Tirr, grav, Tl, table_num, met,
tau_hf_e, kRoss_hf_e);
break;
default:
//std::cout << "Invalid IC integer in IC_mod, stopping" << std::endl;
break;
}
if (corr == true)
{
adiabat_correction(nlay, Tl, pl, prc, gradrad_hf_l, gradad_hf_l);
}
else
{
prc = p0;
}
}
#endif // IC_mod | fefdbacada91a746445456dcfc0164d16ddd2c99.cu | #ifndef IC_mod
#define IC_mod
#pragma once
// G.K.H. Lee - PA Noti
// translation from from Fortran to C++ by PA Noti - Feb 2021
// last changes in Fortran by GKH-Lee - Oct 2020
#pragma once
#include <cuda_runtime.h>
#include <string>
#include <iostream>
//#include "FMS_RC_para_and_const.h"
//#include "k_Rosseland_mod.h"
//#include <math.h>
// Calculates the IR band Rosseland mean opacity (local T) according to the
// Freedman et al. (2014) fit and coefficents
void k_Ross_Freedman(double Tin, double Pin, double met, double& k_IR) {
// dependcies
//// pow from math
//// log10 from math
//// atan from math
//// onedivpi -> namespace constants::onedivpi
// Input:
// T - Local gas temperature [K]
// P - Local gas pressure [pa]
// met - Local metallicity [M/H] (log10 from solar, solar [M/H] = 0.0)
// Call by reference (Input&Output):
// k_IR - IR band Rosseland mean opacity [m2 kg-1]
// work variables
double k_lowP;
double k_hiP;
double T;
double P;
double Tl10;
double Pl10;
const double pi = atan((double)(1)) * 4;
const double onedivpi = 1.0 / pi;
// Coefficent parameters for Freedman et al. (2014) table fit
double c1 = 10.602;
double c2 = 2.882;
double c3 = 6.09e-15;
double c4 = 2.954;
double c5 = -2.526;
double c6 = 0.843;
double c7 = -5.490;
double c8_l = -14.051, c8_h = 82.241;
double c9_l = 3.055, c9_h = -55.456;
double c10_l = 0.024, c10_h = 8.754;
double c11_l = 1.877, c11_h = 0.7048;
double c12_l = -0.445, c12_h = -0.0414;
double c13_l = 0.8321, c13_h = 0.8321;
// start operations
T = Tin;
P = Pin * ((double)10.0); // Convert to dyne cm-2
Tl10 = log10(T);
Pl10 = log10(P);
// Low pressure expression
k_lowP = c1 * atan(Tl10 - c2) -
(c3 / (Pl10 + c4)) * exp(pow((Tl10 - c5), 2.0)) + c6 * met + c7;
// De log10
k_lowP = pow(((double)10.0), k_lowP);
// Temperature split for coefficents = 800 K
if (T <= 800.0)
{
k_hiP = c8_l + c9_l * Tl10 + c10_l * pow(Tl10, 2.0) +
Pl10 * (c11_l + c12_l * Tl10) +
c13_l * met * (0.5 + onedivpi * atan((Tl10 - ((double)2.5)) / (double)0.2));
}
else
{
k_hiP = c8_h + c9_h * Tl10 +
c10_h * pow(Tl10, 2.0) + Pl10 * (c11_h + c12_h * Tl10) +
c13_h * met * (0.5 + onedivpi * atan((Tl10 - ((double)2.5)) / (double)0.2));
}
// De log10
k_hiP = pow(((double)10.0), k_hiP);
// Total Rosseland mean opacity - converted to m2 kg-1
k_IR = (k_lowP + k_hiP) / ((double)10.0);
// Avoid divergence in fit for large values
if (k_IR > 1.0e10)
{
k_IR = 1.0e10;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Calculates 3 band grey visual gamma values and 2 picket fence IR gamma values
// according to the coefficents and equations in:
// Parmentier & Menou (2014) and Parmentier et al. (2015)
// NOTE: This does not calculate the opacity - call k_Ross_Freedman for that
void gam_Parmentier(double Teff, int table_num, double(&gam_V)[3], double(&Beta_V)[3],
double(&Beta)[2], double& gam_1, double& gam_2, double& gam_P, double& tau_lim) {
// dependcies
//// pow from math
//// log10 from math
// Input:
// Teff - Effective temperature [K] (See Parmentier papers for various ways to calculate this)
// for non-irradiated atmosphere Teff = Tint
// table_num - Table selection from Parmentier et al. (2015): 1 = w. TiO/VO, 2 = w.o. TiO/VO
// Call by reference (Input&Output):
// gam_V(3) - gamma ratio for 3 visual bands (gam_V = kV_Ross/kIR_Ross)
// beta_V(3) - fraction of total incident stellar flux in band (1/3 for Parmentier values)
// Beta - equilvalent bandwidth for picket fence IR model
// gam_1 - gamma ratio for IR band 1 (gam_1 = kIR_1/kIR_Ross)
// gam_2 - gamma ratio for IR band 2 (gam_2 = kIR_2/kIR_Ross)
// gam_P - gamma ratio for Planck mean (gam_P = kIR_Planck/kIR_Ross)
// tau_lim - tau limit variable (usually for IC system)
// work variables
double R = 0;
double aP = 0;
double bP = 0;
double cP = 0;
double aV1 = 0, bV1 = 0, aV2 = 0, bV2 = 0, aV3 = 0, bV3 = 0;
double aB = 0, bB = 0;
double l10T = 0, l10T2 = 0, RT = 0;
int i;
// start operations
// Log 10 T_eff variables
l10T = log10(Teff);
l10T2 = pow(l10T, 2.0);
if (table_num == 1) {
// First table in Parmentier et al. (2015) w. TiO/VO
// Start large if statements with visual band and Beta coefficents
if (Teff <= 200.0)
{
aV1 = -5.51; bV1 = 2.48;
aV2 = -7.37; bV2 = 2.53;
aV3 = -3.03; bV3 = -0.20;
aB = 0.84; bB = 0.0;
}
else if (Teff > 200.0 && Teff <= 300.0)
{
aV1 = 1.23; bV1 = -0.45;
aV2 = 13.99; bV2 = -6.75;
aV3 = -13.87; bV3 = 4.51;
aB = 0.84; bB = 0.0;
}
else if (Teff > 300.0 && Teff <= 600.0)
{
aV1 = 8.65; bV1 = -3.45;
aV2 = -15.18; bV2 = 5.02;
aV3 = -11.95; bV3 = 3.74;
aB = 0.84; bB = 0.0;
}
else if (Teff > 600.0 && Teff <= 1400.0)
{
aV1 = -12.96; bV1 = 4.33;
aV2 = -10.41; bV2 = 3.31;
aV3 = -6.97; bV3 = 1.94;
aB = 0.84; bB = 0.0;
}
else if (Teff > 1400.0 && Teff < 2000.0)
{
aV1 = -23.75; bV1 = 7.76;
aV2 = -19.95; bV2 = 6.34;
aV3 = -3.65; bV3 = 0.89;
aB = 0.84; bB = 0.0;
}
else if (Teff >= 2000.0)
{
aV1 = 12.65; bV1 = -3.27;
aV2 = 13.56; bV2 = -3.81;
aV3 = -6.02; bV3 = 1.61;
aB = 6.21; bB = -1.63;
}
// gam_P coefficents
aP = -2.36;
bP = 13.92;
cP = -19.38;
}
else if (table_num == 2)
{
// ! Appendix table from Parmentier et al. (2015) - without TiO and VO
if (Teff <= 200.0)
{
aV1 = -5.51; bV1 = 2.48;
aV2 = -7.37; bV2 = 2.53;
aV3 = -3.03; bV3 = -0.20;
aB = 0.84; bB = 0.0;
}
else if (Teff > 200.0 && Teff <= 300.0)
{
aV1 = 1.23; bV1 = -0.45;
aV2 = 13.99; bV2 = -6.75;
aV3 = -13.87; bV3 = 4.51;
aB = 0.84; bB = 0.0;
}
else if (Teff > 300.0 && Teff <= 600.0)
{
aV1 = 8.65; bV1 = -3.45;
aV2 = -15.18; bV2 = 5.02;
aV3 = -11.95; bV3 = 3.74;
aB = 0.84; bB = 0.0;
}
else if (Teff > 600.0 && Teff <= 1400.0)
{
aV1 = -12.96; bV1 = 4.33;
aV2 = -10.41; bV2 = 3.31;
aV3 = -6.97; bV3 = 1.94;
aB = 0.84; bB = 0.0;
}
else if (Teff > 1400.0 && Teff < 2000.0)
{
aV1 = -1.68; bV1 = 0.75;
aV2 = 6.96; bV2 = -2.21;
aV3 = 0.02; bV3 = -0.28;
aB = 3.0; bB = -0.69;
}
else if (Teff >= 2000.0)
{
aV1 = 10.37; bV1 = -2.91;
aV2 = -2.4; bV2 = 0.62;
aV3 = -16.54; bV3 = 4.74;
aB = 3.0; bB = -0.69;
}
// gam_P coefficents
if (Teff <= 1400.0)
{
aP = -2.36;
bP = 13.92;
cP = -19.38;
}
else
{
aP = -12.45;
bP = 82.25;
cP = -134.42;
}
}
// Calculation of all values
// Visual band gamma
gam_V[0] = pow(((double)10.0), (aV1 + bV1 * l10T));
gam_V[1] = pow(((double)10.0), (aV2 + bV2 * l10T));
gam_V[2] = pow(((double)10.0), (aV3 + bV3 * l10T));
// Visual band fractions
for (i = 0; i < 3; i++)
{
Beta_V[i] = ((double)1.0) / ((double)3.0);
}
// gamma_Planck - if < 1 then make it grey approximation (k_Planck = k_Ross, gam_P = 1)
gam_P = pow(((double)10.0), (aP * l10T2 + bP * l10T + cP));
if (gam_P < 1.0000001)
{
gam_P = 1.0000001;
}
// equivalent bandwidth value
Beta[0] = aB + bB * l10T;
Beta[1] = (1.0) - Beta[0];
// IR band kappa1/kappa2 ratio - Eq. 96 from Parmentier & Menou (2014)
RT = (gam_P - 1.0) / (2.0 * Beta[0] * Beta[1]);
R = 1.0 + RT + sqrt(pow(RT, 2.0) + RT);
// gam_1 and gam_2 values - Eq. 92, 93 from Parmentier & Menou (2014)
gam_1 = Beta[0] + R - Beta[0] * R;
gam_2 = gam_1 / R;
// Calculate tau_lim parameter
tau_lim = ((double)1.0) / (gam_1 * gam_2) * sqrt(gam_P / ((double)3.0));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Calculates the Bond Albedo according to Parmentier et al. (2015) expression
void Bond_Parmentier(double Teff0, double grav, double& AB) {
// dependcies
//// pow from math
//// log10 from math
// Input:
// Teff0 - Atmospheric profile effective temperature [K] with zero albedo
// grav - Surface gravity of planet [m s-2]
// Call by reference (Input&Output):
// AB - Bond albedo
// work variables
double a, b;
// start operations
if (Teff0 <= 250.0)
{
a = ((double)-0.335) * pow(grav, ((double)0.070));
b = 0.0;
}
else if (Teff0 > 250.0 && Teff0 <= 750.0)
{
a = -0.335 * pow(grav, ((double)0.070)) + 2.149 * pow(grav, ((double)0.135));
b = -0.896 * pow(grav, ((double)0.135));
}
else if (Teff0 > 750.0 && Teff0 < 1250.0)
{
a = -0.335 * pow(grav, ((double)0.070)) - 0.428 * pow(grav, ((double)0.135));
b = 0.0;
}
else if (Teff0 >= 1250.0)
{
a = 16.947 - ((double)3.174) * pow(grav, ((double)0.070)) - 4.051 *
pow(grav, ((double)0.135));
b = -5.472 + ((double)0.917) * pow(grav, ((double)0.070)) + 1.170 *
pow(grav, ((double)0.135));
}
// Final Bond Albedo expression
AB = pow(((double)10.0), (a + b * log10(Teff0)));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// This subroutine follows Parmentier & Guillot (2014, 2015) non-grey picket fence scheme
void Parmentier_IC(const int nlay, double* pl,
double* pe, double Tint, double mu, double Tirr,
double grav, double* (&Tl), int table_num, double met,
double* tau, double* kRoss) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay1 -> "FMS_RC_para_&_const.cpp"
//// k_Rosseland_mod::Bond_Parmentier -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Freedman -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Valencia -> "k_Rosseland_modcpp"
//// pow -> math
//// sqrt -> math
//// exp -> math
// Input:
//
// Call by reference (Input & Output):
//
// work variables
int i, j, k;
double Teff0, Teff, Tmu, Bond, Tskin;
double gam_V[3] = { 0 }, Beta_V[3] = { 0 };
double Beta[2];
double gam_1, gam_2, gam_P, tau_lim;
double a0, a1, b0, A, B, At1, At2;
double a2[3], a3[3], b1[3], b2[3], b3[3], Av1[3], Av2[3];
double C[3], D[3], E[3];
double summy;
// start operations
// Effective temperature parameter
Tmu = pow((mu * pow(Tirr, 4.0)), (1.0 / 4.0));
// Find Bond albedo of planet - Bond albedo is given by mu = 1/sqrt(3)
Teff0 = pow(((pow(Tint, 4.0) + (1.0 / sqrt(((double)3.0))) * pow(Tirr, 4.0))), (1.0 / 4.0));
Bond_Parmentier(Teff0, grav, Bond);
Teff = pow((pow(Tint, 4.0) + (((double)1.0) - Bond) * mu * pow(Tirr, 4.0)), (1.0 / 4.0));
// Find the V band gamma, beta and IR gamma and beta ratios for this profile
// Passed mu, so make lat = acos(mu) and lon = 0
gam_Parmentier(Teff, table_num, gam_V, Beta_V, Beta, gam_1, gam_2,
gam_P, tau_lim);
for (i = 0; i < 3; i++)
{
gam_V[i] = gam_V[i] / mu;
}
// Hard work starts here - first calculate all the required coefficents
At1 = pow(gam_1, 2.0) * log(1.0 + 1.0 / (tau_lim * gam_1));
At2 = pow(gam_2, 2.0) * log(1.0 + 1.0 / (tau_lim * gam_2));
for (i = 0; i < 3; i++)
{
Av1[i] = pow(gam_1, 2.0) * log(1.0 + gam_V[i] / gam_1);
Av2[i] = pow(gam_2, 2.0) * log(1.0 + gam_V[i] / gam_2);
}
a0 = 1.0 / gam_1 + 1.0 / gam_2;
a1 = -1.0 / (((double)3.0) * pow(tau_lim, 2.0)) * (gam_P / (1.0 - gam_P) *
(gam_1 + gam_2 - 2.0) / (gam_1 + gam_2) +
(gam_1 + gam_2) * tau_lim - (At1 + At2) * pow(tau_lim, 2.0));
for (i = 0; i < 3; i++)
{
a2[i] = pow(tau_lim, 2.0) / (gam_P * pow(gam_V[i], 2.0)) *
((3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) * (3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)) *
(gam_1 + gam_2) - 3.0 * gam_V[i] * (6.0 * pow(gam_1, 2.0) * pow(gam_2, 2.0) - pow(gam_V[i], 2.0) *
(pow(gam_1, 2.0) + pow(gam_2, 2.0)))) / (1.0 - pow(gam_V[i], 2.0) * pow(tau_lim, 2.0));
a3[i] = -pow(tau_lim, 2.0) * (3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) *
(3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)) * (Av2[i] + Av1[i]) /
(gam_P * pow(gam_V[i], 3.0) * (1.0 - pow(gam_V[i], 2.0) * pow(tau_lim, 2.0)));
b1[i] = gam_1 * gam_2 * (3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) * (3.0 * pow(gam_2, 2.0) -
pow(gam_V[i], 2.0)) * pow(tau_lim, 2) / (gam_P * pow(gam_V[i], 2.0) *
(pow(gam_V[i], 2.0) * pow(tau_lim, 2.0) - 1.0));
b2[i] = 3.0 * (gam_1 + gam_2) * pow(gam_V[i], 3.0) / ((3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) *
(3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)));
b3[i] = (Av2[i] - Av1[i]) / (gam_V[i] * (gam_1 - gam_2));
}
b0 = 1.0 / (gam_1 * gam_2 / (gam_1 - gam_2) * (At1 - At2) / 3.0 - pow((gam_1 * gam_2), 2.0) /
sqrt(3.0 * gam_P) - pow((gam_1 * gam_2), 3.0) /
((1.0 - gam_1) * (1.0 - gam_2) * (gam_1 + gam_2)));
A = 1.0 / ((double)3.0) * (a0 + a1 * b0);
B = -1.0 / ((double)3.0) * pow((gam_1 * gam_2), 2.0) / gam_P * b0;
for (i = 0; i < 3; i++)
{
C[i] = -1.0 / ((double)3.0) * (b0 * b1[i] * (1.0 + b2[i] + b3[i]) * a1 + a2[i] + a3[i]);
D[i] = 1.0 / ((double)3.0) * pow((gam_1 * gam_2), 2.0) / gam_P * b0 * b1[i] * (1.0 + b2[i] + b3[i]);
E[i] = (3.0 - pow((gam_V[i] / gam_1), 2.0)) * (3.0 - pow((gam_V[i] / gam_2), 2.0)) /
(9.0 * gam_V[i] * (pow((gam_V[i] * tau_lim), 2.0) - 1.0));
}
// T-p structure calculation - we follow exactly V. Parmentier's method
// Estimate the skin temperature by setting tau = 0
tau[0] = 0.0;
summy = 0.0;
for (i = 0; i < 3; i++)
{
summy += 3.0 * Beta_V[i] * pow(Tmu, 4.0) / 4.0 * (C[i] + D[i] * exp(-tau[0] / tau_lim) +
E[i] * exp(-gam_V[i] * tau[0]));
}
Tskin = 3.0 * pow(Tint, 4) / 4.0 * (tau[0] + A + B * exp(-tau[0] / tau_lim)) + summy;
Tskin = pow(Tskin, (1.0 / 4.0));
// Estimate the opacity TOA at the skin temperature - assume this is = first layer optacity
k_Ross_Freedman(Tskin, pl[0], met, kRoss[0]);
// k_Rosseland_mod::k_Ross_Valencia(Tskin, pe[0], met, kRoss[0]);
// Recalculate the upmost tau with new kappa
tau[0] = kRoss[0] / grav * pl[0];
// More accurate layer T at uppermost layer
summy = 0.0;
for (i = 0; i < 3; i++)
{
summy += 3.0 * Beta_V[i] * pow(Tmu, 4.0) / 4.0 * (C[i] + D[i] * exp(-tau[0] / tau_lim) +
E[i] * exp(-gam_V[i] * tau[0]));
}
Tl[0] = 3.0 * pow(Tint, 4) / 4.0 * (tau[0] + A + B * exp(-tau[0] / tau_lim)) + summy;
Tl[0] = pow(Tl[0], (1.0 / 4.0));
// Now we can loop in optical depth space to find the T-p profile
for (i = 1; i < nlay; i++)
{
// Initial guess for layer
k_Ross_Freedman(Tl[i - 1], sqrt(pl[i - 1] * pl[i]), met, kRoss[i]);
// call k_Rosseland_mod::k_Ross_Valencia(Tl[i-1], sqrt(pl[i-1]*pl[i], met, kRoss[i])
tau[i] = tau[i - 1] + kRoss[i] / grav * (pl[i] - pl[i - 1]);
summy = 0.0;
for (j = 0; j < 3; j++)
{
summy = +3.0 * Beta_V[j] * pow(Tmu, 4.0) / 4.0 * (C[j] + D[j] * exp(-tau[i] / tau_lim) +
E[j] * exp(-gam_V[j] * tau[i]));
}
Tl[i] = 3.0 * pow(Tint, 4.0) / 4.0 * (tau[i] + A + B * exp(-tau[i] / tau_lim)) + summy;
Tl[i] = pow(Tl[i], (1.0 / 4.0));
// Convergence loop
for (j = 0; j < 5; j++)
{
k_Ross_Freedman(sqrt(Tl[i - 1] * Tl[i]), sqrt(pl[i - 1] * pl[i]), met, kRoss[i]);
//call k_Rosseland_mod::k_Ross_Valencia(sqrt(Tl[i-1]*T[i]), sqrt(pl[i-1]*pl[i]), met, kRoss[i])
tau[i] = tau[i - 1] + kRoss[i] / grav * (pl[i] - pl[i - 1]);
summy = 0.0;
for (k = 0; k < 3; k++)
{
summy += 3.0 * Beta_V[k] * pow(Tmu, 4.0) / 4.0 * (C[k] + D[k] * exp(-tau[i] / tau_lim) +
E[k] * exp(-gam_V[k] * tau[i]));
}
Tl[i] = 3.0 * pow(Tint, 4.0) / 4.0 * (tau[i] + A + B * exp(-tau[i] / tau_lim)) + summy;
Tl[i] = pow(Tl[i], (1.0 / 4.0));
}
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Subroutine that corrects for adiabatic region following Parmentier & Guillot (2015)
void adiabat_correction(int nlay, double* (&Tl),
double* pl, double& prc, double* gradrad, double* gradad) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// pow -> math
/// log10 -> math
// Input:
//
// Call by reference (Input & Output):
//
// work variables
int i, iRC, iRC1;
// start operations
for (i = 0; i < (nlay - 1); i++)
{
gradrad[i] = (log10(Tl[i]) - log10(Tl[i + 1])) / (log10(pl[i]) - log10(pl[i + 1]));
gradad[i] = ((double)0.32) - ((double)0.10) * Tl[i] / ((double)3000.0);
}
gradrad[nlay - 1] = 0.0;
gradad[nlay - 1] = 0.0;
iRC = nlay - 2;
iRC1 = nlay - 2;
for (i = (nlay - 2); i >= 0; i--)
{
if (iRC1 <= i + 1)
{
if (gradrad[i] > ((double)0.7) * gradad[i])
{
iRC1 = i;
}
if (gradrad[i] > ((double)0.98) * gradad[i])
{
iRC = i;
prc = pl[iRC];
}
}
}
if (iRC < nlay)
{
for (i = iRC; i < nlay - 1; i++)
{
gradad[i] = (double)0.32 - ((double)0.10) * Tl[i] / ((double)3000.0);
if (gradad[i] < 0.0)
{
gradad[i] = 0.0;
}
Tl[i + 1] = Tl[i] * pow((pl[i + 1] / pl[i]), gradad[i]);
}
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
void IC_profile(int iIC, bool corr, int nlay, double p0,
double* pl, double* pe,
double* k_V, double* k_IR,
double Tint, double mu, double Tirr, double grav, double fl,
double* Tl, double& prc, int table_num, double met,
double* tau_hf_e, double* kRoss_hf_e, double* tau_IRl_hf_l,
double* gradrad_hf_l, double* gradad_hf_l) {
// dependcies
//// pow() from math
//// log10() from math
//// atan() from math
//// namespace constants::onedivpi -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay1 -> "FMS_RC_para_&_const.cpp"
//// k_Rosseland_mod::Bond_Parmentier() -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Freedman() -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Valencia() -> "k_Rosseland_modcpp"
//// sqrt() -> math
//// exp() -> math
// Input:
//
// Call by reference (Input & Output):
//
// start operations
switch (iIC)
{
case 4:
Parmentier_IC(nlay, pl, pe, Tint, mu, Tirr, grav, Tl, table_num, met,
tau_hf_e, kRoss_hf_e);
break;
default:
//std::cout << "Invalid IC integer in IC_mod, stopping" << std::endl;
break;
}
if (corr == true)
{
adiabat_correction(nlay, Tl, pl, prc, gradrad_hf_l, gradad_hf_l);
}
else
{
prc = p0;
}
}
#endif // IC_mod |
e782c331ab8b63c7a9bc4a621c1bd5e7a3d3b9f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Thrust code needs to be compiled with nvcc
#include <memory>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/miopen_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _Fill(
T* output_data,
T val,
HIP_LONG N) {
HIP_LONG id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = val;
id += blockDim.x;
}
}
}
template <typename T>
void Fill(hipStream_t stream, T* output, T value, int64_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
HIP_LONG N = static_cast<HIP_LONG>(count);
hipLaunchKernelGGL(( _Fill<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(dim3(blocksPerGrid)), dim3(dim3(GridDim::maxThreadsPerBlock)), 0, stream, output, value, N);
}
template <typename T>
class ConstantBufferImpl : public IConstantBuffer<T> {
public:
ConstantBufferImpl(T val) : buffer_(nullptr), count_(0), val_(val) {}
~ConstantBufferImpl() {
if (buffer_)
HIP_CALL_THROW(hipFree(buffer_));
}
virtual const T* GetBuffer(hipStream_t stream, size_t count) {
if (count > count_) {
if (buffer_) {
HIP_CALL_THROW(hipFree(buffer_));
buffer_ = nullptr;
}
HIP_CALL_THROW(hipMalloc(&buffer_, count * sizeof(T)));
count_ = count;
Fill(stream, buffer_, val_, count);
}
return buffer_;
}
private:
T* buffer_;
size_t count_;
T val_;
};
template <typename T>
std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() {
return std::make_unique<ConstantBufferImpl<T>>(Consts<T>::One);
}
template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>();
template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>();
template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>();
#define SPECIALIZED_FILL(T) \
template void Fill<T>(hipStream_t stream, T * output, T value, int64_t count);
SPECIALIZED_FILL(int8_t)
SPECIALIZED_FILL(int16_t)
SPECIALIZED_FILL(int32_t)
SPECIALIZED_FILL(int64_t)
SPECIALIZED_FILL(float)
SPECIALIZED_FILL(double)
SPECIALIZED_FILL(__half)
} // namespace rocm
} // namespace onnxruntime
| e782c331ab8b63c7a9bc4a621c1bd5e7a3d3b9f3.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Thrust code needs to be compiled with nvcc
#include <memory>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/miopen_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _Fill(
T* output_data,
T val,
HIP_LONG N) {
HIP_LONG id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = val;
id += blockDim.x;
}
}
}
template <typename T>
void Fill(hipStream_t stream, T* output, T value, int64_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
HIP_LONG N = static_cast<HIP_LONG>(count);
_Fill<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream>>>(output, value, N);
}
template <typename T>
class ConstantBufferImpl : public IConstantBuffer<T> {
public:
ConstantBufferImpl(T val) : buffer_(nullptr), count_(0), val_(val) {}
~ConstantBufferImpl() {
if (buffer_)
HIP_CALL_THROW(hipFree(buffer_));
}
virtual const T* GetBuffer(hipStream_t stream, size_t count) {
if (count > count_) {
if (buffer_) {
HIP_CALL_THROW(hipFree(buffer_));
buffer_ = nullptr;
}
HIP_CALL_THROW(hipMalloc(&buffer_, count * sizeof(T)));
count_ = count;
Fill(stream, buffer_, val_, count);
}
return buffer_;
}
private:
T* buffer_;
size_t count_;
T val_;
};
template <typename T>
std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() {
return std::make_unique<ConstantBufferImpl<T>>(Consts<T>::One);
}
template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>();
template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>();
template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>();
#define SPECIALIZED_FILL(T) \
template void Fill<T>(hipStream_t stream, T * output, T value, int64_t count);
SPECIALIZED_FILL(int8_t)
SPECIALIZED_FILL(int16_t)
SPECIALIZED_FILL(int32_t)
SPECIALIZED_FILL(int64_t)
SPECIALIZED_FILL(float)
SPECIALIZED_FILL(double)
SPECIALIZED_FILL(__half)
} // namespace rocm
} // namespace onnxruntime
|
06d0a579060f6cca592195b9a6b1c308be553877.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/param_pack/param_pack.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "megdnn/dtype.h"
#include "src/cuda/param_pack/param_pack.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace param_pack {
template <typename T>
__global__ void concat_kernel(const T** srcs, T* dst,
const int32_t* table_outer,
const int32_t* table_inner,
size_t total_size) {
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < total_size) {
int32_t i = table_outer[addr];
int32_t idx = table_inner[addr];
if (idx != -1)
dst[addr] = srcs[i][idx];
else
dst[addr] = 0;
}
}
template <typename T>
__global__ void split_kernel(const T* src, T** dsts,
const int32_t* table_outer,
const int32_t* table_inner,
size_t total_size) {
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < total_size) {
int32_t i = table_outer[addr];
int32_t idx = table_inner[addr];
if (idx != -1) {
dsts[i][idx] = src[addr];
}
}
}
template <typename T>
void split_proxy(const T* src, T** dsts, size_t total_size,
const int32_t* table_outer, const int32_t* table_inner,
hipStream_t stream) {
size_t NR_BLOCKS = DIVUP(total_size, NR_THREADS);
hipLaunchKernelGGL(( split_kernel), dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream,
src, dsts, table_outer, table_inner, total_size);
after_kernel_launch();
}
template <typename T>
void concat_proxy(const T** srcs, T* dst, size_t total_size,
const int32_t* table_outer,
const int32_t* table_inner, hipStream_t stream) {
size_t NR_BLOCKS = DIVUP(total_size, NR_THREADS);
hipLaunchKernelGGL(( concat_kernel), dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream,
srcs, dst, table_outer, table_inner, total_size);
after_kernel_launch();
}
#define INST(T) \
template void concat_proxy<T>(const T**, T*, size_t, \
const int32_t*, const int32_t*, \
hipStream_t); \
template void split_proxy<T>(const T*, T**, size_t, \
const int32_t*, const int32_t*, \
hipStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace param_pack
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 06d0a579060f6cca592195b9a6b1c308be553877.cu | /**
* \file dnn/src/cuda/param_pack/param_pack.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "megdnn/dtype.h"
#include "src/cuda/param_pack/param_pack.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace param_pack {
template <typename T>
__global__ void concat_kernel(const T** srcs, T* dst,
const int32_t* table_outer,
const int32_t* table_inner,
size_t total_size) {
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < total_size) {
int32_t i = table_outer[addr];
int32_t idx = table_inner[addr];
if (idx != -1)
dst[addr] = srcs[i][idx];
else
dst[addr] = 0;
}
}
template <typename T>
__global__ void split_kernel(const T* src, T** dsts,
const int32_t* table_outer,
const int32_t* table_inner,
size_t total_size) {
size_t addr = threadIdx.x + blockIdx.x * blockDim.x;
if (addr < total_size) {
int32_t i = table_outer[addr];
int32_t idx = table_inner[addr];
if (idx != -1) {
dsts[i][idx] = src[addr];
}
}
}
template <typename T>
void split_proxy(const T* src, T** dsts, size_t total_size,
const int32_t* table_outer, const int32_t* table_inner,
cudaStream_t stream) {
size_t NR_BLOCKS = DIVUP(total_size, NR_THREADS);
split_kernel<<<NR_BLOCKS, NR_THREADS, 0, stream>>>(
src, dsts, table_outer, table_inner, total_size);
after_kernel_launch();
}
template <typename T>
void concat_proxy(const T** srcs, T* dst, size_t total_size,
const int32_t* table_outer,
const int32_t* table_inner, cudaStream_t stream) {
size_t NR_BLOCKS = DIVUP(total_size, NR_THREADS);
concat_kernel<<<NR_BLOCKS, NR_THREADS, 0, stream>>>(
srcs, dst, table_outer, table_inner, total_size);
after_kernel_launch();
}
#define INST(T) \
template void concat_proxy<T>(const T**, T*, size_t, \
const int32_t*, const int32_t*, \
cudaStream_t); \
template void split_proxy<T>(const T*, T**, size_t, \
const int32_t*, const int32_t*, \
cudaStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace param_pack
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
be989311de0614c31b294b0cc8e2f952753704cf.hip | // !!! This is a file automatically generated by hipify!!!
//export PATH=/usr/local/cuda-9.1/bin:$PATH
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <rocblas.h>
#include <rocblas.h>
#include <cblas.h>
#include <hip/hip_complex.h>
void resuse(char *str);
double timeDiff( struct timespec *t1, struct timespec *t2)
{
double T1, T2;
T2 = (double)t2->tv_sec + (double)t2->tv_nsec / 1.0e9;
T1 = (double)t1->tv_sec - (double)t1->tv_nsec / 1.0e9;
return(T2 - T1);
}
int main(int argc, char *argv[])
{
int dim = atoi(argv[1]);
printf("%d\n", dim);
int i,j;
int status;
hipDoubleComplex *psa, *psb, *psc, *psc_GPU;
hipDoubleComplex *sap, *sbp, *scp;
hipDoubleComplex *pda, *pdb, *pdc;
float deltaT = 0.0;
struct timespec t1;
struct timespec t2;
int ptime();
pda = NULL;
pdb = NULL;
pdc = NULL;
// psa = (hipDoubleComplex *) malloc(dim * dim * sizeof(*psa) );
// psb = (hipDoubleComplex *) malloc(dim * dim * sizeof(*psb) );
// psc = (hipDoubleComplex *) malloc(dim * dim * sizeof(*psc) );
hipHostMalloc((void**)&psa, dim * dim * sizeof(*psa));
hipHostMalloc((void**)&psb, dim * dim * sizeof(*psb));
hipHostMalloc((void**)&psc, dim * dim * sizeof(*psc));
//psc_GPU = (double *) malloc(dim * dim * sizeof(*psc) );
hipHostMalloc((void**) &psc_GPU, dim * dim * sizeof(*psc), hipHostMallocMapped);
size_t mem_tot_0 = 0;
size_t mem_free_0 = 0;
hipMemGetInfo (&mem_free_0, & mem_tot_0);
printf("DEVICE MEMORY TOTAL: %zu MB\n", mem_tot_0/1000000);
printf("DEVICE AVAILABLE: %zu MB\n", mem_free_0/1000000);
clock_gettime(CLOCK_MONOTONIC, &t1);
sap = psa;
sbp = psb;
scp = psc;
for (i = 0; i < dim; i++)
for (j = 0; j < dim; j++) {
sap[(dim*i) + j].x = ((dim*i) + j);
sbp[(dim*i) + j].x = ((dim*i) + j);
}
// clock_gettime(CLOCK_MONOTONIC, &t1);
// /* Performs operation using blas */
// cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, dim, dim, dim, alpha, psa, dim, psb, dim, beta, psc, dim);
// clock_gettime(CLOCK_MONOTONIC, &t2);
// deltaT = timeDiff(&t1, &t2);
// printf(" *** Elapsed Time [BLAS] = %6.4f secs *** \n", deltaT);
/* Initialize CUDA */
status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Re-initialize the matrices */
clock_gettime(CLOCK_MONOTONIC, &t1);
sap = psa;
sbp = psb;
scp = psc_GPU;
for (i = 0; i < dim; i++) {
for (j = 0; j < dim; j++) {
sap[(dim*i) + j].x = (double)rand() / (double)RAND_MAX;
sbp[(dim*i) + j].x = (double)rand() / (double)RAND_MAX;
}
}
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);;
fflush(stdout);
/* Allocate device memory for the matrices */
status = hipblasAlloc(dim*dim, sizeof(*pda), (void**) &pda);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device memory allocation error (A)\n");
return EXIT_FAILURE;
}
status = hipHostMalloc((void**) &pdb, (dim * dim * sizeof(*psb)), hipHostMallocDefault);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device memory allocation error (B)\n");
return EXIT_FAILURE;
}
status = hipHostMalloc((void**) &pdc, dim * dim * sizeof(*psc), hipHostMallocMapped);
if (status != hipSuccess)
printf("Error allocating pinned host memory\n");
/* Initialize the device matrices with the host matrices */
status = hipblasSetVector(dim*dim, sizeof(*psa), psa, 1, pda, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
hipHostGetDevicePointer(&pdc, psc_GPU, 0);
hipblasHandle_t handle;
hipblasCreate(&handle);
const hipDoubleComplex alf = make_cuDoubleComplex(1, 0);
const hipDoubleComplex bet = make_cuDoubleComplex(0, 0);
const hipDoubleComplex *alpha = &alf;
const hipDoubleComplex *beta = &bet;
/* Clear last error */
hipblasGetError();
clock_gettime(CLOCK_MONOTONIC, &t1);
status = hipMemcpy(pdb, psb, dim * dim * sizeof(*psb), hipMemcpyHostToDevice);
status = hipMemcpy(pda, psa, dim * dim * sizeof(*psa), hipMemcpyHostToDevice);
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);
printf(" *** Elapsed Time [CUBLAS COPY TO DEVICE] = %6.4f secs *** \n", deltaT);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device access error (write B)\n");
printf("%d\n",status );
return EXIT_FAILURE;
}
/* Performs operation using cublas */
clock_gettime(CLOCK_MONOTONIC, &t1);
//hipblasDgemm('n', 'n', dim, dim, dim, alpha, pda, dim, pdb, dim, beta, pdc, dim);
// Attempt to use optimised complex matrix multiplication:
hipblasZgemm3m(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, dim, dim, alpha, pdb, dim, pda, dim, beta, pdc, dim);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);
printf(" *** Elapsed Time [CUBLAS MULTIPLY] = %6.4f secs *** \n", deltaT);
clock_gettime(CLOCK_MONOTONIC, &t1);
status = hipMemcpy(psc_GPU, pdc, dim * dim * sizeof(*pdc), hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);
printf(" *** Elapsed Time [CUBLAS COPY TO HOST] = %6.4f secs *** \n", deltaT);
status = hipblasGetError();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
// for (int i = 0; i < dim; ++i)
// {
// for (int j = 0; j < dim; j++)
// {
// //printf("%lf",psc[(dim*i) + j] );
// //printf("---- %lf\n",psc_GPU[(dim*i) + j] );
// if (psc[(dim*i) + j] != psc_GPU[(dim*i) + j])
// {
// printf("ERROR!!!\n");
// exit(1);
// }
// }
// }
// printf("\nOUTPUT IS THE SAME\n");
}
| be989311de0614c31b294b0cc8e2f952753704cf.cu | //export PATH=/usr/local/cuda-9.1/bin:$PATH
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cublas.h>
#include <cublas_v2.h>
#include <cblas.h>
#include <cuComplex.h>
void resuse(char *str);
double timeDiff( struct timespec *t1, struct timespec *t2)
{
double T1, T2;
T2 = (double)t2->tv_sec + (double)t2->tv_nsec / 1.0e9;
T1 = (double)t1->tv_sec - (double)t1->tv_nsec / 1.0e9;
return(T2 - T1);
}
int main(int argc, char *argv[])
{
int dim = atoi(argv[1]);
printf("%d\n", dim);
int i,j;
int status;
cuDoubleComplex *psa, *psb, *psc, *psc_GPU;
cuDoubleComplex *sap, *sbp, *scp;
cuDoubleComplex *pda, *pdb, *pdc;
float deltaT = 0.0;
struct timespec t1;
struct timespec t2;
int ptime();
pda = NULL;
pdb = NULL;
pdc = NULL;
// psa = (cuDoubleComplex *) malloc(dim * dim * sizeof(*psa) );
// psb = (cuDoubleComplex *) malloc(dim * dim * sizeof(*psb) );
// psc = (cuDoubleComplex *) malloc(dim * dim * sizeof(*psc) );
cudaMallocHost((void**)&psa, dim * dim * sizeof(*psa));
cudaMallocHost((void**)&psb, dim * dim * sizeof(*psb));
cudaMallocHost((void**)&psc, dim * dim * sizeof(*psc));
//psc_GPU = (double *) malloc(dim * dim * sizeof(*psc) );
cudaHostAlloc((void**) &psc_GPU, dim * dim * sizeof(*psc), cudaHostAllocMapped);
size_t mem_tot_0 = 0;
size_t mem_free_0 = 0;
cudaMemGetInfo (&mem_free_0, & mem_tot_0);
printf("DEVICE MEMORY TOTAL: %zu MB\n", mem_tot_0/1000000);
printf("DEVICE AVAILABLE: %zu MB\n", mem_free_0/1000000);
clock_gettime(CLOCK_MONOTONIC, &t1);
sap = psa;
sbp = psb;
scp = psc;
for (i = 0; i < dim; i++)
for (j = 0; j < dim; j++) {
sap[(dim*i) + j].x = ((dim*i) + j);
sbp[(dim*i) + j].x = ((dim*i) + j);
}
// clock_gettime(CLOCK_MONOTONIC, &t1);
// /* Performs operation using blas */
// cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, dim, dim, dim, alpha, psa, dim, psb, dim, beta, psc, dim);
// clock_gettime(CLOCK_MONOTONIC, &t2);
// deltaT = timeDiff(&t1, &t2);
// printf(" *** Elapsed Time [BLAS] = %6.4f secs *** \n", deltaT);
/* Initialize CUDA */
status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Re-initialize the matrices */
clock_gettime(CLOCK_MONOTONIC, &t1);
sap = psa;
sbp = psb;
scp = psc_GPU;
for (i = 0; i < dim; i++) {
for (j = 0; j < dim; j++) {
sap[(dim*i) + j].x = (double)rand() / (double)RAND_MAX;
sbp[(dim*i) + j].x = (double)rand() / (double)RAND_MAX;
}
}
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);;
fflush(stdout);
/* Allocate device memory for the matrices */
status = cublasAlloc(dim*dim, sizeof(*pda), (void**) &pda);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device memory allocation error (A)\n");
return EXIT_FAILURE;
}
status = cudaHostAlloc((void**) &pdb, (dim * dim * sizeof(*psb)), cudaHostAllocDefault);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device memory allocation error (B)\n");
return EXIT_FAILURE;
}
status = cudaHostAlloc((void**) &pdc, dim * dim * sizeof(*psc), cudaHostAllocMapped);
if (status != cudaSuccess)
printf("Error allocating pinned host memory\n");
/* Initialize the device matrices with the host matrices */
status = cublasSetVector(dim*dim, sizeof(*psa), psa, 1, pda, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
cudaHostGetDevicePointer(&pdc, psc_GPU, 0);
cublasHandle_t handle;
cublasCreate(&handle);
const cuDoubleComplex alf = make_cuDoubleComplex(1, 0);
const cuDoubleComplex bet = make_cuDoubleComplex(0, 0);
const cuDoubleComplex *alpha = &alf;
const cuDoubleComplex *beta = &bet;
/* Clear last error */
cublasGetError();
clock_gettime(CLOCK_MONOTONIC, &t1);
status = cudaMemcpy(pdb, psb, dim * dim * sizeof(*psb), cudaMemcpyHostToDevice);
status = cudaMemcpy(pda, psa, dim * dim * sizeof(*psa), cudaMemcpyHostToDevice);
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);
printf(" *** Elapsed Time [CUBLAS COPY TO DEVICE] = %6.4f secs *** \n", deltaT);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! device access error (write B)\n");
printf("%d\n",status );
return EXIT_FAILURE;
}
/* Performs operation using cublas */
clock_gettime(CLOCK_MONOTONIC, &t1);
//cublasDgemm('n', 'n', dim, dim, dim, alpha, pda, dim, pdb, dim, beta, pdc, dim);
// Attempt to use optimised complex matrix multiplication:
cublasZgemm3m(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, dim, dim, alpha, pdb, dim, pda, dim, beta, pdc, dim);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);
printf(" *** Elapsed Time [CUBLAS MULTIPLY] = %6.4f secs *** \n", deltaT);
clock_gettime(CLOCK_MONOTONIC, &t1);
status = cudaMemcpy(psc_GPU, pdc, dim * dim * sizeof(*pdc), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &t2);
deltaT = timeDiff(&t1, &t2);
printf(" *** Elapsed Time [CUBLAS COPY TO HOST] = %6.4f secs *** \n", deltaT);
status = cublasGetError();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
// for (int i = 0; i < dim; ++i)
// {
// for (int j = 0; j < dim; j++)
// {
// //printf("%lf",psc[(dim*i) + j] );
// //printf("---- %lf\n",psc_GPU[(dim*i) + j] );
// if (psc[(dim*i) + j] != psc_GPU[(dim*i) + j])
// {
// printf("ERROR!!!\n");
// exit(1);
// }
// }
// }
// printf("\nOUTPUT IS THE SAME\n");
}
|
4d00cb15408b3630d0725aa9042aa17f18fcc97b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_pow (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(pow)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]);
}
} | 4d00cb15408b3630d0725aa9042aa17f18fcc97b.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_pow (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(pow)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]);
}
} |
ff72ed1b83300b9939cb7569b284ac0cc7d1111a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* The MIT License
Copyright (c) 2015, by Ruhua Jiang <ruhua.jiang@yahoo.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <zlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <inttypes.h>
#include <thrust/version.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "kseq.h"
#include "bwt.h"
// STEP 1: declare the type of file handler and the read() function
KSEQ_INIT(gzFile, gzread)
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
unsigned char nst_nt4_table[256] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 5 /*'-'*/, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4 };
int fasta_parser(char *indexFile, Sequence &sequence) {
gzFile fp;
kseq_t *seq;
int l;
fp = gzopen(indexFile, "r"); // STEP 2: open the file handler
seq = kseq_init(fp); // STEP 3: initialize seq
while ((l = kseq_read(seq)) >= 0) { // STEP 4: read sequence
sequence.length = seq->seq.l;
sequence.seq = (uint8_t*) calloc(sequence.length, 1);
for (int i = 0; i != seq->seq.l; i++)
sequence.seq[i] = nst_nt4_table[(int) seq->seq.s[i]];
}
printf("return value: %d\n", l);
kseq_destroy(seq); // STEP 5: destroy seq
gzclose(fp); // STEP 6: close the file handler
return 0;
}
__global__ void count_suffixs(uint32_t *d_A, const uint8_t *d_sequence,
uint64_t l, int prefix_len, uint64_t seq_len, uint32_t prefix) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint64_t start_pos = idx * l, end_pos = (idx + 1) * l; //end_pos not included
uint32_t count = 0;
uint32_t value = 0;
uint32_t debug_count = 0;
if (idx == BLOCKS_NUMBER * THREADS_NUMBER - 1) { // last thread
end_pos = seq_len;
}
//Each thread scans through start_pos to end_pos
if (idx != BLOCKS_NUMBER * THREADS_NUMBER - 1) {
for (uint64_t i = 0; i < l; i++) {
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix)
count++;
}
} else { //last thread
for (uint64_t i = 0; i < l; i++) {
if (start_pos + i <= end_pos - prefix_len) //all threads except for last part of last thread must satisfy
{
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix)
count++;
} else if ((start_pos + i > end_pos - prefix_len)
&& (start_pos + i < end_pos)) { //The last part of last thread is reading end of the text
int left = seq_len - (start_pos + i);
value = 0;
for (int k = 0; k < left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + i + k];
}
for (int k = 0; k < prefix_len - left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[k];
}
debug_count++;
if (value == prefix)
count++;
} else
break;
}
}
d_A[idx] = count;
}
__global__ void get_suffix_block(const uint32_t *d_A, const uint8_t *d_sequence,
uint64_t l, int prefix_len, uint64_t seq_len, uint32_t prefix,
uint32_t *d_B) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint64_t start_pos = idx * l, end_pos = (idx + 1) * l; //end_pos not included
uint32_t b = 0;
uint32_t value = 0;
uint8_t tail_seq[10];
uint32_t debug_count = 0;
if (idx == BLOCKS_NUMBER * THREADS_NUMBER - 1) { // last thread
end_pos = seq_len;
}
//Each thread scans through start_pos to end_pos
if (idx != BLOCKS_NUMBER * THREADS_NUMBER - 1) {
for (uint64_t i = 0; i < l; i++) {
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix) {
d_B[d_A[idx] + b] = start_pos + i;
b++;
}
}
} else { //last thread
for (uint64_t i = 0; i < l; i++) {
if (start_pos + i <= end_pos - prefix_len) //all threads except for last part of last thread must satisfy
{
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix) {
d_B[d_A[idx] + b] = start_pos + i;
b++;
}
} else if ((start_pos + i > end_pos - prefix_len)
&& (start_pos + i < end_pos)) { //The last part of last thread is reading end of the text
int left = seq_len - (start_pos + i);
value = 0;
for (int k = 0; k < left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + i + k];
}
for (int k = 0; k < prefix_len - left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[k];
}
debug_count++;
if (value == prefix) {
d_B[d_A[idx] + b] = start_pos + i;
b++;
}
} else
break;
}
}
}
__global__ void map_pos2key(const uint8_t *d_sequence, uint64_t seq_length,
int prefix_len, int v_len, const uint32_t *d_B, uint32_t B_len,
uint32_t *d_B_key) {
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < B_len) {
uint64_t position = d_B[idx];
uint32_t value = 0;
if (position <= (uint64_t) (seq_length - (v_len + 1))) {
for (int i = prefix_len; i < v_len; i++) {
value <<= 2;
value += (uint32_t) d_sequence[position + i];
}
} else {
int left = seq_length - position;
value = 0;
for (int k = 0; k < left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[position + k];
}
for (int k = 0; k < prefix_len - left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[k];
}
}
d_B_key[idx] = value;
}
}
/*
__global__ void checking(uint32_t *d_B_key, uint32_t B_len)
{
}
*/
void suffix_blocking(uint32_t prefix, const uint8_t *d_sequence,
uint8_t *h_sequence, uint64_t seq_length, int prefix_len,
FILE *bwt_file) {
uint32_t *d_A = NULL; //counting array
uint32_t *h_A = NULL;
uint32_t *h_B = NULL;
uint32_t *d_B = NULL;
uint32_t *h_B_key = NULL;
uint32_t *d_B_key = NULL;
fprintf(stderr, "block %llu processing\n", prefix);
uint64_t l = seq_length / (THREADS_NUMBER * BLOCKS_NUMBER) + 1;
h_A = (uint32_t*) malloc(
sizeof(uint32_t) * (THREADS_NUMBER * BLOCKS_NUMBER));
if (h_A == NULL) {
fprintf(stderr, "Out of host memory!\n");
}
CUDA_CHECK_RETURN(
hipMalloc((void**) &d_A, sizeof(uint32_t) * (THREADS_NUMBER*BLOCKS_NUMBER)));hipLaunchKernelGGL((
count_suffixs), dim3(BLOCKS_NUMBER), dim3(THREADS_NUMBER), 0, 0, d_A, d_sequence,l,prefix_len,seq_length, prefix);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
hipMemcpy(h_A, d_A, sizeof(uint32_t) * (THREADS_NUMBER * BLOCKS_NUMBER),
hipMemcpyDeviceToHost);
uint32_t last = h_A[THREADS_NUMBER * BLOCKS_NUMBER - 1];
//exclusive prefix sum
thrust::exclusive_scan(h_A, h_A + THREADS_NUMBER * BLOCKS_NUMBER, h_A);
//calculate number of elments in B
uint32_t B_len = h_A[THREADS_NUMBER * BLOCKS_NUMBER - 1] + last;
h_B = (uint32_t*) malloc(sizeof(uint32_t) * B_len);
if (h_B == NULL) {
fprintf(stderr, "Out of host memory!\n");
exit(1);
}
CUDA_CHECK_RETURN(hipMalloc((void** ) &d_B, sizeof(uint32_t) * B_len));
hipMemcpy(d_A, h_A, sizeof(uint32_t) * (THREADS_NUMBER * BLOCKS_NUMBER),
hipMemcpyHostToDevice);hipLaunchKernelGGL((
get_suffix_block), dim3(BLOCKS_NUMBER), dim3(THREADS_NUMBER), 0, 0, d_A, d_sequence,l, prefix_len, seq_length,prefix,d_B);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
hipMemcpy(h_B, d_B, sizeof(uint32_t) * B_len, hipMemcpyDeviceToHost);
CUDA_CHECK_RETURN(hipFree(d_A));
//Map position to a key
int thread_num = 1024;
int block_num = B_len / thread_num + 1;
h_B_key = (uint32_t*) malloc(sizeof(uint32_t) * B_len);
if (h_B_key == NULL) {
fprintf(stderr, "Out of host memory!\n");
exit(1);
}
CUDA_CHECK_RETURN(hipMalloc((void** ) &d_B_key, sizeof(uint32_t) * B_len));
int v_len = 16 + prefix_len; //assume we can use first v_len characters to distinguish suffixes
hipLaunchKernelGGL(( map_pos2key), dim3(block_num), dim3(thread_num), 0, 0, d_sequence,seq_length,prefix_len, v_len,d_B, B_len,d_B_key);
hipMemcpy(h_B_key, d_B_key, sizeof(uint32_t) * B_len,
hipMemcpyDeviceToHost);
//Radix sort
thrust::sort_by_key(h_B_key, h_B_key + B_len, h_B);
//Construct the BWT
//FIXME Now h_B is almost the suffix array, possibly with some minor errors, since we need a refinement
uint8_t bwt_value;
for (uint32_t i = 0; i < B_len; i++) {
if (h_B[i] == 0)
bwt_value = h_sequence[seq_length - 1];
else
bwt_value = h_sequence[h_B[i] - 1];
fseek(bwt_file, 0, SEEK_END);
fprintf(bwt_file, "%d", bwt_value);
}
//Parallel checking algorithm
}
int bwt(char *indexFile, uint32_t prefix_len) {
uint8_t *d_sequence = NULL;
int forward_only = 1;
uint64_t buf_length;
Sequence sequence;
fasta_parser(indexFile, sequence);
FILE *bwt_file;
char *bwt_fn;
bwt_fn = (char*) calloc(strlen(indexFile) + 10, 1);
strcpy(bwt_fn, indexFile);
strcat(bwt_fn, ".bwt");
bwt_file = fopen(bwt_fn, "wb");
CUDA_CHECK_RETURN(
hipMalloc((void** ) &d_sequence,
sizeof(uint8_t) * sequence.length));
CUDA_CHECK_RETURN(
hipMemcpy(d_sequence, sequence.seq,
sizeof(uint8_t) * sequence.length, hipMemcpyHostToDevice)); //copy text to global memory of GPU
//There will be 4^prefix_len number of blocks
uint32_t number_blocks = pow(4.0, (int) prefix_len);
for (uint32_t p = 0; p < number_blocks; p++) {
suffix_blocking(p, d_sequence, sequence.seq, sequence.length,
prefix_len, bwt_file);
}
return 0;
}
| ff72ed1b83300b9939cb7569b284ac0cc7d1111a.cu | /* The MIT License
Copyright (c) 2015, by Ruhua Jiang <ruhua.jiang@yahoo.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <zlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <inttypes.h>
#include <thrust/version.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "kseq.h"
#include "bwt.h"
// STEP 1: declare the type of file handler and the read() function
KSEQ_INIT(gzFile, gzread)
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
unsigned char nst_nt4_table[256] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 5 /*'-'*/, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4 };
int fasta_parser(char *indexFile, Sequence &sequence) {
gzFile fp;
kseq_t *seq;
int l;
fp = gzopen(indexFile, "r"); // STEP 2: open the file handler
seq = kseq_init(fp); // STEP 3: initialize seq
while ((l = kseq_read(seq)) >= 0) { // STEP 4: read sequence
sequence.length = seq->seq.l;
sequence.seq = (uint8_t*) calloc(sequence.length, 1);
for (int i = 0; i != seq->seq.l; i++)
sequence.seq[i] = nst_nt4_table[(int) seq->seq.s[i]];
}
printf("return value: %d\n", l);
kseq_destroy(seq); // STEP 5: destroy seq
gzclose(fp); // STEP 6: close the file handler
return 0;
}
__global__ void count_suffixs(uint32_t *d_A, const uint8_t *d_sequence,
uint64_t l, int prefix_len, uint64_t seq_len, uint32_t prefix) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint64_t start_pos = idx * l, end_pos = (idx + 1) * l; //end_pos not included
uint32_t count = 0;
uint32_t value = 0;
uint32_t debug_count = 0;
if (idx == BLOCKS_NUMBER * THREADS_NUMBER - 1) { // last thread
end_pos = seq_len;
}
//Each thread scans through start_pos to end_pos
if (idx != BLOCKS_NUMBER * THREADS_NUMBER - 1) {
for (uint64_t i = 0; i < l; i++) {
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix)
count++;
}
} else { //last thread
for (uint64_t i = 0; i < l; i++) {
if (start_pos + i <= end_pos - prefix_len) //all threads except for last part of last thread must satisfy
{
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix)
count++;
} else if ((start_pos + i > end_pos - prefix_len)
&& (start_pos + i < end_pos)) { //The last part of last thread is reading end of the text
int left = seq_len - (start_pos + i);
value = 0;
for (int k = 0; k < left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + i + k];
}
for (int k = 0; k < prefix_len - left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[k];
}
debug_count++;
if (value == prefix)
count++;
} else
break;
}
}
d_A[idx] = count;
}
__global__ void get_suffix_block(const uint32_t *d_A, const uint8_t *d_sequence,
uint64_t l, int prefix_len, uint64_t seq_len, uint32_t prefix,
uint32_t *d_B) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint64_t start_pos = idx * l, end_pos = (idx + 1) * l; //end_pos not included
uint32_t b = 0;
uint32_t value = 0;
uint8_t tail_seq[10];
uint32_t debug_count = 0;
if (idx == BLOCKS_NUMBER * THREADS_NUMBER - 1) { // last thread
end_pos = seq_len;
}
//Each thread scans through start_pos to end_pos
if (idx != BLOCKS_NUMBER * THREADS_NUMBER - 1) {
for (uint64_t i = 0; i < l; i++) {
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix) {
d_B[d_A[idx] + b] = start_pos + i;
b++;
}
}
} else { //last thread
for (uint64_t i = 0; i < l; i++) {
if (start_pos + i <= end_pos - prefix_len) //all threads except for last part of last thread must satisfy
{
value = 0;
for (int k = 0; k < prefix_len; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + k + i];
}
debug_count++;
if (value == prefix) {
d_B[d_A[idx] + b] = start_pos + i;
b++;
}
} else if ((start_pos + i > end_pos - prefix_len)
&& (start_pos + i < end_pos)) { //The last part of last thread is reading end of the text
int left = seq_len - (start_pos + i);
value = 0;
for (int k = 0; k < left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[start_pos + i + k];
}
for (int k = 0; k < prefix_len - left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[k];
}
debug_count++;
if (value == prefix) {
d_B[d_A[idx] + b] = start_pos + i;
b++;
}
} else
break;
}
}
}
__global__ void map_pos2key(const uint8_t *d_sequence, uint64_t seq_length,
int prefix_len, int v_len, const uint32_t *d_B, uint32_t B_len,
uint32_t *d_B_key) {
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < B_len) {
uint64_t position = d_B[idx];
uint32_t value = 0;
if (position <= (uint64_t) (seq_length - (v_len + 1))) {
for (int i = prefix_len; i < v_len; i++) {
value <<= 2;
value += (uint32_t) d_sequence[position + i];
}
} else {
int left = seq_length - position;
value = 0;
for (int k = 0; k < left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[position + k];
}
for (int k = 0; k < prefix_len - left; k++) {
value <<= 2;
value += (uint32_t) d_sequence[k];
}
}
d_B_key[idx] = value;
}
}
/*
__global__ void checking(uint32_t *d_B_key, uint32_t B_len)
{
}
*/
void suffix_blocking(uint32_t prefix, const uint8_t *d_sequence,
uint8_t *h_sequence, uint64_t seq_length, int prefix_len,
FILE *bwt_file) {
uint32_t *d_A = NULL; //counting array
uint32_t *h_A = NULL;
uint32_t *h_B = NULL;
uint32_t *d_B = NULL;
uint32_t *h_B_key = NULL;
uint32_t *d_B_key = NULL;
fprintf(stderr, "block %llu processing\n", prefix);
uint64_t l = seq_length / (THREADS_NUMBER * BLOCKS_NUMBER) + 1;
h_A = (uint32_t*) malloc(
sizeof(uint32_t) * (THREADS_NUMBER * BLOCKS_NUMBER));
if (h_A == NULL) {
fprintf(stderr, "Out of host memory!\n");
}
CUDA_CHECK_RETURN(
cudaMalloc((void**) &d_A, sizeof(uint32_t) * (THREADS_NUMBER*BLOCKS_NUMBER)));
count_suffixs<<<BLOCKS_NUMBER, THREADS_NUMBER>>>(d_A, d_sequence,l,prefix_len,seq_length, prefix);
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
cudaMemcpy(h_A, d_A, sizeof(uint32_t) * (THREADS_NUMBER * BLOCKS_NUMBER),
cudaMemcpyDeviceToHost);
uint32_t last = h_A[THREADS_NUMBER * BLOCKS_NUMBER - 1];
//exclusive prefix sum
thrust::exclusive_scan(h_A, h_A + THREADS_NUMBER * BLOCKS_NUMBER, h_A);
//calculate number of elments in B
uint32_t B_len = h_A[THREADS_NUMBER * BLOCKS_NUMBER - 1] + last;
h_B = (uint32_t*) malloc(sizeof(uint32_t) * B_len);
if (h_B == NULL) {
fprintf(stderr, "Out of host memory!\n");
exit(1);
}
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_B, sizeof(uint32_t) * B_len));
cudaMemcpy(d_A, h_A, sizeof(uint32_t) * (THREADS_NUMBER * BLOCKS_NUMBER),
cudaMemcpyHostToDevice);
get_suffix_block<<<BLOCKS_NUMBER, THREADS_NUMBER>>>(d_A, d_sequence,l, prefix_len, seq_length,prefix,d_B);
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
cudaMemcpy(h_B, d_B, sizeof(uint32_t) * B_len, cudaMemcpyDeviceToHost);
CUDA_CHECK_RETURN(cudaFree(d_A));
//Map position to a key
int thread_num = 1024;
int block_num = B_len / thread_num + 1;
h_B_key = (uint32_t*) malloc(sizeof(uint32_t) * B_len);
if (h_B_key == NULL) {
fprintf(stderr, "Out of host memory!\n");
exit(1);
}
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_B_key, sizeof(uint32_t) * B_len));
int v_len = 16 + prefix_len; //assume we can use first v_len characters to distinguish suffixes
map_pos2key<<<block_num, thread_num>>>(d_sequence,seq_length,prefix_len, v_len,d_B, B_len,d_B_key);
cudaMemcpy(h_B_key, d_B_key, sizeof(uint32_t) * B_len,
cudaMemcpyDeviceToHost);
//Radix sort
thrust::sort_by_key(h_B_key, h_B_key + B_len, h_B);
//Construct the BWT
//FIXME Now h_B is almost the suffix array, possibly with some minor errors, since we need a refinement
uint8_t bwt_value;
for (uint32_t i = 0; i < B_len; i++) {
if (h_B[i] == 0)
bwt_value = h_sequence[seq_length - 1];
else
bwt_value = h_sequence[h_B[i] - 1];
fseek(bwt_file, 0, SEEK_END);
fprintf(bwt_file, "%d", bwt_value);
}
//Parallel checking algorithm
}
int bwt(char *indexFile, uint32_t prefix_len) {
uint8_t *d_sequence = NULL;
int forward_only = 1;
uint64_t buf_length;
Sequence sequence;
fasta_parser(indexFile, sequence);
FILE *bwt_file;
char *bwt_fn;
bwt_fn = (char*) calloc(strlen(indexFile) + 10, 1);
strcpy(bwt_fn, indexFile);
strcat(bwt_fn, ".bwt");
bwt_file = fopen(bwt_fn, "wb");
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &d_sequence,
sizeof(uint8_t) * sequence.length));
CUDA_CHECK_RETURN(
cudaMemcpy(d_sequence, sequence.seq,
sizeof(uint8_t) * sequence.length, cudaMemcpyHostToDevice)); //copy text to global memory of GPU
//There will be 4^prefix_len number of blocks
uint32_t number_blocks = pow(4.0, (int) prefix_len);
for (uint32_t p = 0; p < number_blocks; p++) {
suffix_blocking(p, d_sequence, sequence.seq, sequence.length,
prefix_len, bwt_file);
}
return 0;
}
|
60f26dcd4cc278dd02d42f73cabf6c2d1991ee9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_updating_graph_mask[id]=true;
}
}
}
}
#endif
| 60f26dcd4cc278dd02d42f73cabf6c2d1991ee9a.cu | /*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_updating_graph_mask[id]=true;
}
}
}
}
#endif
|
11344c31ebbea84a718c6b4c903ccd90bb5fdf69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The MIT License
*
* Copyright (c) 1997-2015 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <sci_defs/cuda_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
//______________________________________________________________________
//
// @brief A kernel that applies the stencil used in timeAdvance(...)
// @param domainLower a three component vector that gives the lower corner of the work area as (x,y,z)
// @param domainHigh a three component vector that gives the highest non-ghost layer cell of the domain as (x,y,z)
// @param domainSize a three component vector that gives the size of the domain including ghost nodes
// @param ghostLayers the number of layers of ghost cells
// @param phi pointer to the source phi allocated on the device
// @param newphi pointer to the sink phi allocated on the device
// @param residual the residual calculated by this individual kernel
__global__ void poissonGPU1Kernel(uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* phi,
double* newphi,
double* residual)
{
// calculate the thread indices
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
// Get the size of the data block in which the variables reside.
// This is essentially the stride in the index calculations.
int dx = domainSize.x;
int dy = domainSize.y;
// If the threads are within the bounds of the ghost layers
// the algorithm is allowed to stream along the z direction
// applying the stencil to a line of cells. The z direction
// is streamed because it allows access of x and y elements
// that are close to one another which should allow coalesced
// memory accesses.
if(i > 0 && j > 0 && i < domainHigh.x && j < domainHigh.y) {
for (int k = domainLow.z; k < domainHigh.z; k++) {
// For an array of [ A ][ B ][ C ], we can index it thus:
// (a * B * C) + (b * C) + (c * 1)
int idx = INDEX3D(dx,dy,i,j,k);
newphi[idx] = (1. / 6)
* (phi[INDEX3D(dx,dy, (i-1), j, k)]
+ phi[INDEX3D(dx,dy, (i+1), j, k)]
+ phi[INDEX3D(dx,dy, i, (j-1), k)]
+ phi[INDEX3D(dx,dy, i, (j+1), k)]
+ phi[INDEX3D(dx,dy, i, j, (k-1))]
+ phi[INDEX3D(dx,dy, i, j, (k+1))]);
// TODO Finish residual calculation using atomics
}
}
}
void launchPoisson1Kernel(dim3 dimGrid,
dim3 dimBlock,
uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* d_phi,
double* d_newphi,
double* residual)
{
hipLaunchKernelGGL(( poissonGPU1Kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, domainLow,
domainHigh,
domainSize,
numGhostCells,
d_phi,
d_newphi,
residual);
}
#ifdef __cplusplus
}
#endif
| 11344c31ebbea84a718c6b4c903ccd90bb5fdf69.cu | /*
* The MIT License
*
* Copyright (c) 1997-2015 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <sci_defs/cuda_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
//______________________________________________________________________
//
// @brief A kernel that applies the stencil used in timeAdvance(...)
// @param domainLower a three component vector that gives the lower corner of the work area as (x,y,z)
// @param domainHigh a three component vector that gives the highest non-ghost layer cell of the domain as (x,y,z)
// @param domainSize a three component vector that gives the size of the domain including ghost nodes
// @param ghostLayers the number of layers of ghost cells
// @param phi pointer to the source phi allocated on the device
// @param newphi pointer to the sink phi allocated on the device
// @param residual the residual calculated by this individual kernel
__global__ void poissonGPU1Kernel(uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* phi,
double* newphi,
double* residual)
{
// calculate the thread indices
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
// Get the size of the data block in which the variables reside.
// This is essentially the stride in the index calculations.
int dx = domainSize.x;
int dy = domainSize.y;
// If the threads are within the bounds of the ghost layers
// the algorithm is allowed to stream along the z direction
// applying the stencil to a line of cells. The z direction
// is streamed because it allows access of x and y elements
// that are close to one another which should allow coalesced
// memory accesses.
if(i > 0 && j > 0 && i < domainHigh.x && j < domainHigh.y) {
for (int k = domainLow.z; k < domainHigh.z; k++) {
// For an array of [ A ][ B ][ C ], we can index it thus:
// (a * B * C) + (b * C) + (c * 1)
int idx = INDEX3D(dx,dy,i,j,k);
newphi[idx] = (1. / 6)
* (phi[INDEX3D(dx,dy, (i-1), j, k)]
+ phi[INDEX3D(dx,dy, (i+1), j, k)]
+ phi[INDEX3D(dx,dy, i, (j-1), k)]
+ phi[INDEX3D(dx,dy, i, (j+1), k)]
+ phi[INDEX3D(dx,dy, i, j, (k-1))]
+ phi[INDEX3D(dx,dy, i, j, (k+1))]);
// TODO Finish residual calculation using atomics
}
}
}
void launchPoisson1Kernel(dim3 dimGrid,
dim3 dimBlock,
uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* d_phi,
double* d_newphi,
double* residual)
{
poissonGPU1Kernel<<< dimGrid, dimBlock >>>(domainLow,
domainHigh,
domainSize,
numGhostCells,
d_phi,
d_newphi,
residual);
}
#ifdef __cplusplus
}
#endif
|
fdbb52d989cb6b3925e9b3aca406ab57c9dbd9ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2017 Jiawei Chiu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "vec.h"
namespace gi {
__global__
void SVecFillKernel(float* x, const int n, const float value) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
x[i] = value;
}
}
void SVec::DeviceFill(float value) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
hipLaunchKernelGGL(( SVecFillKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data(), size(), value);
}
__global__
void SVecSetToSum3Kernel(float* x, const int n,
const float alpha, const float* __restrict__ a,
const float beta, const float* __restrict__ b,
const float gamma, const float* __restrict__ c) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
x[i] = alpha * a[i] + beta * b[i] + gamma * c[i];
}
}
void SVec::DeviceSetToSum3(float alpha, const SVec& a, float beta,
const SVec& b, float gamma, const SVec& c) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
hipLaunchKernelGGL(( SVecSetToSum3Kernel), dim3(num_blocks), dim3(num_threads), 0, 0,
data(), size(), alpha, a.data(), beta, b.data(), gamma, c.data());
}
__global__
void SetToPermuteKernel(const float* __restrict__ input,
const int* __restrict__ perm,
float* output, const int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
output[i] = input[perm[i]];
}
}
void SVec::DeviceSetToPermute(const IVec& perm, const SVec& src) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
hipLaunchKernelGGL(( SetToPermuteKernel), dim3(num_blocks), dim3(num_threads), 0, 0,
src.data(), perm.data(), data(), size());
}
__global__
void SoftThresholdKernel(float* output, const int n, const float threshold) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
float v = output[i];
if (v > threshold) {
v -= threshold;
} else if (v < -threshold) {
v += threshold;
} else {
v = 0;
}
output[i] = v;
}
}
void SVec::DeviceSoftThreshold(float threshold) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
hipLaunchKernelGGL(( SoftThresholdKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data(), size(), threshold);
}
__global__
void HardThresholdKernel(float* output, const int n, const float threshold) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
float v = output[i];
// Assume threshold is positive.
if (v >= -threshold && v <= threshold) {
output[i] = 0;
}
}
}
void SVec::DeviceHardThreshold(float threshold) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
hipLaunchKernelGGL(( HardThresholdKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data(), size(), threshold);
}
__global__
void VecMultiplyKernel(float* output, const int n, const float alpha) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
output[i] *= alpha;
}
}
void SVec::DeviceMultiply(float alpha) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
hipLaunchKernelGGL(( VecMultiplyKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data(), size(), alpha);
}
__global__
void VecInvertKernel(float* output, const int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
output[i] = 1.0 / output[i];
}
}
void SVec::DeviceInvert() {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
hipLaunchKernelGGL(( VecInvertKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data(), size());
}
} // namespace gi | fdbb52d989cb6b3925e9b3aca406ab57c9dbd9ce.cu | /*
Copyright 2017 Jiawei Chiu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "vec.h"
namespace gi {
__global__
void SVecFillKernel(float* x, const int n, const float value) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
x[i] = value;
}
}
void SVec::DeviceFill(float value) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
SVecFillKernel<<<num_blocks, num_threads>>>(data(), size(), value);
}
__global__
void SVecSetToSum3Kernel(float* x, const int n,
const float alpha, const float* __restrict__ a,
const float beta, const float* __restrict__ b,
const float gamma, const float* __restrict__ c) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
x[i] = alpha * a[i] + beta * b[i] + gamma * c[i];
}
}
void SVec::DeviceSetToSum3(float alpha, const SVec& a, float beta,
const SVec& b, float gamma, const SVec& c) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
SVecSetToSum3Kernel<<<num_blocks, num_threads>>>(
data(), size(), alpha, a.data(), beta, b.data(), gamma, c.data());
}
__global__
void SetToPermuteKernel(const float* __restrict__ input,
const int* __restrict__ perm,
float* output, const int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
output[i] = input[perm[i]];
}
}
void SVec::DeviceSetToPermute(const IVec& perm, const SVec& src) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
SetToPermuteKernel<<<num_blocks, num_threads>>>(
src.data(), perm.data(), data(), size());
}
__global__
void SoftThresholdKernel(float* output, const int n, const float threshold) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
float v = output[i];
if (v > threshold) {
v -= threshold;
} else if (v < -threshold) {
v += threshold;
} else {
v = 0;
}
output[i] = v;
}
}
void SVec::DeviceSoftThreshold(float threshold) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
SoftThresholdKernel<<<num_blocks, num_threads>>>(data(), size(), threshold);
}
__global__
void HardThresholdKernel(float* output, const int n, const float threshold) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
float v = output[i];
// Assume threshold is positive.
if (v >= -threshold && v <= threshold) {
output[i] = 0;
}
}
}
void SVec::DeviceHardThreshold(float threshold) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
HardThresholdKernel<<<num_blocks, num_threads>>>(data(), size(), threshold);
}
__global__
void VecMultiplyKernel(float* output, const int n, const float alpha) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
output[i] *= alpha;
}
}
void SVec::DeviceMultiply(float alpha) {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
VecMultiplyKernel<<<num_blocks, num_threads>>>(data(), size(), alpha);
}
__global__
void VecInvertKernel(float* output, const int n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
output[i] = 1.0 / output[i];
}
}
void SVec::DeviceInvert() {
int num_blocks;
int num_threads;
BlocksThreads(256, 256, size(), &num_blocks, &num_threads);
VecInvertKernel<<<num_blocks, num_threads>>>(data(), size());
}
} // namespace gi |
77d84173858b32ef67bb192f57455c08958a095b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "external/petscvector/petscvector.cuh"
#include "external/petscvector/common/fem.h"
namespace pascinference {
namespace common {
/* cuda kernels cannot be a member of class */
__global__ void kernel_fem_reduce_data(double *data1, double *data2, int T1, int T2, int T2local, double diff);
__global__ void kernel_fem_prolongate_data(double *data1, double *data2, int T1, int T2, int T2local, double diff);
void Fem<PetscVector>::ExternalContent::cuda_occupancy(){
LOG_FUNC_BEGIN
/* compute optimal kernel calls */
gpuErrchk( hipOccupancyMaxPotentialBlockSize( &minGridSize_reduce, &blockSize_reduce, kernel_fem_reduce_data, 0, 0) );
gpuErrchk( hipOccupancyMaxPotentialBlockSize( &minGridSize_prolongate, &blockSize_prolongate, kernel_fem_prolongate_data, 0, 0) );
LOG_FUNC_END
}
void Fem<PetscVector>::ExternalContent::cuda_reduce_data(double *data1_arr, double *data2_arr, int T1, int T2, int T2local, double diff){
LOG_FUNC_BEGIN
hipLaunchKernelGGL(( kernel_fem_reduce_data), dim3(gridSize_reduce), dim3(blockSize_reduce), 0, 0, data1_arr, data2_arr, T1, T2, T2local, diff);
gpuErrchk( hipDeviceSynchronize() );
LOG_FUNC_END
}
void Fem<PetscVector>::ExternalContent::cuda_prolongate_data(double *data1_arr, double *data2_arr, int T1, int T2, int T2local, double diff){
LOG_FUNC_BEGIN
hipLaunchKernelGGL(( kernel_fem_prolongate_data), dim3(gridSize_prolongate), dim3(blockSize_prolongate), 0, 0, data1_arr, data2_arr, T1, T2, T2local, diff);
gpuErrchk( hipDeviceSynchronize() );
LOG_FUNC_END
}
__global__ void kernel_fem_reduce_data(double *data1, double *data2, int T1, int T2, int T2local, double diff) {
int t2 = blockIdx.x*blockDim.x + threadIdx.x;
if(t2 < T2local){
double mysum = 0.0;
for(int i=round(t2*diff); i < round((t2+1)*diff);i++){
mysum += data1[i];
}
data2[t2] = mysum;
}
}
__global__ void kernel_fem_prolongate_data(double *data1, double *data2, int T1, int T2, int T2local, double diff) {
int t2 = blockIdx.x*blockDim.x + threadIdx.x;
if(t2 < T2local){
for(int i=round(t2*diff); i < round((t2+1)*diff);i++){
data1[i] = data2[t2];
}
}
}
}
} /* end of namespace */
| 77d84173858b32ef67bb192f57455c08958a095b.cu | #include "external/petscvector/petscvector.cuh"
#include "external/petscvector/common/fem.h"
namespace pascinference {
namespace common {
/* cuda kernels cannot be a member of class */
__global__ void kernel_fem_reduce_data(double *data1, double *data2, int T1, int T2, int T2local, double diff);
__global__ void kernel_fem_prolongate_data(double *data1, double *data2, int T1, int T2, int T2local, double diff);
void Fem<PetscVector>::ExternalContent::cuda_occupancy(){
LOG_FUNC_BEGIN
/* compute optimal kernel calls */
gpuErrchk( cudaOccupancyMaxPotentialBlockSize( &minGridSize_reduce, &blockSize_reduce, kernel_fem_reduce_data, 0, 0) );
gpuErrchk( cudaOccupancyMaxPotentialBlockSize( &minGridSize_prolongate, &blockSize_prolongate, kernel_fem_prolongate_data, 0, 0) );
LOG_FUNC_END
}
void Fem<PetscVector>::ExternalContent::cuda_reduce_data(double *data1_arr, double *data2_arr, int T1, int T2, int T2local, double diff){
LOG_FUNC_BEGIN
kernel_fem_reduce_data<<<gridSize_reduce, blockSize_reduce>>>(data1_arr, data2_arr, T1, T2, T2local, diff);
gpuErrchk( cudaDeviceSynchronize() );
LOG_FUNC_END
}
void Fem<PetscVector>::ExternalContent::cuda_prolongate_data(double *data1_arr, double *data2_arr, int T1, int T2, int T2local, double diff){
LOG_FUNC_BEGIN
kernel_fem_prolongate_data<<<gridSize_prolongate, blockSize_prolongate>>>(data1_arr, data2_arr, T1, T2, T2local, diff);
gpuErrchk( cudaDeviceSynchronize() );
LOG_FUNC_END
}
__global__ void kernel_fem_reduce_data(double *data1, double *data2, int T1, int T2, int T2local, double diff) {
int t2 = blockIdx.x*blockDim.x + threadIdx.x;
if(t2 < T2local){
double mysum = 0.0;
for(int i=round(t2*diff); i < round((t2+1)*diff);i++){
mysum += data1[i];
}
data2[t2] = mysum;
}
}
__global__ void kernel_fem_prolongate_data(double *data1, double *data2, int T1, int T2, int T2local, double diff) {
int t2 = blockIdx.x*blockDim.x + threadIdx.x;
if(t2 < T2local){
for(int i=round(t2*diff); i < round((t2+1)*diff);i++){
data1[i] = data2[t2];
}
}
}
}
} /* end of namespace */
|
995e71e5ac88b1c31ba3d4a60c9c9ebe7318b9c6.hip | // !!! This is a file automatically generated by hipify!!!
// *****************************************************************************
// Filename: gpu_storage.cc
// Date: 2012-12-25 10:01
// Author: Guangda Lai
// Email: lambda2fei@gmail.com
// Description: TODO(laigd): Put the file description here.
// *****************************************************************************
#include "gpu_storage.h"
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "config.h"
#include "constants.h"
#include "device_graph_data.h"
#include "device_graph_data_types.h"
#include "device_hash_functions.h"
#include "device_util.h"
#include "edge_content_manager.h"
#include "gpu_status.h"
#include "gpu_storage.h"
#include "user_api.h"
#include "vertex_content_manager.h"
#ifdef LAMBDA_DEBUG
#include "debug.h"
#define LAMBDA_HEADER "---> [GPUStorageManager]: "
#endif
/********************* Helper data structure and functions ********************/
struct SortVconById_LT {
__device__ bool operator()(const unsigned int idx1, const unsigned int idx2) {
return d_vcon.d_id[idx1] < d_vcon.d_id[idx2];
}
};
struct SortVconByInEdgeCount_LT {
__device__ bool operator()(const unsigned int idx1, const unsigned int idx2) {
return d_vcon.d_in_edge_count[idx1] == d_vcon.d_in_edge_count[idx2]
? d_vcon.d_out_edge_count[idx1] > d_vcon.d_out_edge_count[idx2]
: d_vcon.d_in_edge_count[idx1] > d_vcon.d_in_edge_count[idx2];
}
};
__global__ void K_SelfGather(
const unsigned int *src,
const unsigned int size,
unsigned int *dst) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(size);
for (unsigned int i = tid; i < size; i += num_threads) {
dst[src[i]] = i;
}
}
__global__ void K_Invert(
const unsigned int *src,
const unsigned int size,
unsigned int *dst) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(size);
for (unsigned int i = tid; i < size; i += num_threads) {
dst[i] = src[size - 1 - i];
}
}
// @value_to_find contains @num_value vertex IDs, and @vid_index[i] is the final
// position of vertex who owns id i. What we are going to do in this function is
// let out_index[i] to be the position of vertex who owns id value_to_find[i].
__global__ void K_SingleGPUFindSortedVidIndex(
const unsigned int *vid_index,
const unsigned int *value_to_find,
const unsigned int num_value,
unsigned int *out_index) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(num_value);
for (unsigned int i = tid; i < num_value; i += num_threads) {
out_index[i] = vid_index[value_to_find[i]];
}
}
// @array_of_blocks contains several blocks of data. Each block contains all
// same elements, the first block are all 0s, the second all 1s and so on, and
// if block A is presented earlier than block B in the array, then the size of
// block A is bigger than or equal to the size of block B.
//
// @prefix_sum_of_sorted_block_size contains the prefix sum of the sorted (from
// small to large) block size of each block in array_of_blocks.
__global__ void K_SingleGPUTranspose(
const unsigned int *array_of_blocks,
const unsigned int array_size,
const unsigned int *prefix_sum_of_sorted_block_size,
const unsigned int num_blocks,
unsigned int *trans_index) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(array_size);
for (unsigned int i = tid; i < array_size; i += num_threads) {
const unsigned int val = array_of_blocks[i];
unsigned int pow;
for (pow = 1; i >= pow && array_of_blocks[i - pow] == val; pow <<= 1);
pow >>= 1;
unsigned int first = i - pow;
while (pow) {
if (first >= pow && array_of_blocks[first - pow] == val) first -= pow;
pow >>= 1;
}
unsigned int row = i - first; // Starts from 0.
unsigned int l = 0, r = num_blocks;
while (l < r) {
unsigned int mid = (l + r) >> 1;
unsigned int cmp_val =
mid == 0 ? prefix_sum_of_sorted_block_size[0] :
prefix_sum_of_sorted_block_size[mid]
- prefix_sum_of_sorted_block_size[mid - 1];
if (row < cmp_val) {
r = mid;
} else {
l = mid + 1;
}
}
unsigned int result = (r == 0 ? 0 : prefix_sum_of_sorted_block_size[r - 1]);
result += (num_blocks - r) * row;
// We can add "array_of_blocks[i]" here because we assume that if k < i,
// then the size of block which contains value of array_of_blocks[k] is
// bigger than that of array_of_blocks[i].
trans_index[i] = result + array_of_blocks[i];
}
}
__global__ void K_SingleGPUCalculateInMsgNext(
const unsigned int *array_of_blocks,
const unsigned int *transpose_index,
const unsigned int array_size,
unsigned int *transposed_next) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(array_size);
for (unsigned int i = tid; i < array_size; i += num_threads) {
if (i != array_size - 1 && array_of_blocks[i] == array_of_blocks[i + 1]) {
transposed_next[transpose_index[i]] = transpose_index[i + 1];
} else {
transposed_next[transpose_index[i]] = ~0U;
}
}
}
__global__ void sendmessage(OutEdgeIterator it,int mindist,int edgenum){
int idx = blockDim.x*blockIdx.x+threadIdx.x;
if(idx<edgenum){
it.AddOffset(idx);
// printf("inner sendmessate idx=%d\n",idx);
Message msg(*it);
msg.set_dist(mindist + it->get_weight());
msg.Send();
}
}
/*
__global__ void K_UserCompute() {
const unsigned int num_vertex = d_vcon.d_size;
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(num_vertex);
for (unsigned int idx = tid; idx < num_vertex; idx += num_threads) {
Vertex v(idx);
MessageIterator msgs(idx);
int message = v.Compute(&msgs);
if(message > -1){
int threadNum = v.get_out_edge_count() ;
OutEdgeIterator it = v.GetOutEdgeIterator();
if(threadNum > 100){
sendmessage<<<1,threadNum>>>(it,message);
}else{
for (; !it.Done(); it.Next()) {
Message msg(*it);
msg.set_dist(message + it->get_weight());
msg.Send();
}
}
}
}
}
*/
__global__ void K_UserCompute() {
const unsigned int num_vertex = d_vcon.d_size;
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(num_vertex);
for (unsigned int idx = tid; idx < num_vertex; idx += num_threads) {
Vertex v(idx);
MessageIterator msgs(idx);
int message = v.Compute(&msgs);
if(message == -1){
return;
}else{
int out_edge_Num = v.get_out_edge_count() ;
OutEdgeIterator it = v.GetOutEdgeIterator();
if(out_edge_Num>25 ){
// printf("1500 use dyp\n");
// int grid =(out_edge_Num-1)/16 +1;
hipLaunchKernelGGL(( sendmessage), dim3(1),dim3(64), 0, 0, it,message,out_edge_Num);
}else{
for (it = v.GetOutEdgeIterator(); !it.Done(); it.Next()) {
Message msg(*it);
msg.set_dist(message + it->get_weight());
msg.Send();
}
}
}
}
}
// namespace
/******************* GPUStorageManager function definition ********************/
GPUStorageManager::GPUStorageManager()
: conf(NULL),
msg_per_gpu_begin_calculated(false),
msg_per_gpu_begin(NULL),
msg_per_gpu_end(NULL) {
}
GPUStorageManager::~GPUStorageManager() {
if (msg_per_gpu_begin != NULL) delete[] msg_per_gpu_begin;
if (msg_per_gpu_end != NULL) delete[] msg_per_gpu_end;
}
void GPUStorageManager::Init(const Config *c) {
conf = c;
msg_per_gpu_begin = new unsigned int[conf->GetNumGPUControlThreads()];
msg_per_gpu_end = new unsigned int[conf->GetNumGPUControlThreads()];
}
// Below are functions dealing with constant memory.
void GPUStorageManager::GetGPUStatusFromGPU(GPUStatus *gpu_status) {
checkCudaErrors(hipMemcpyFromSymbol(
gpu_status, d_gpu_status, sizeof(*gpu_status), 0,
hipMemcpyDeviceToHost));
}
void GPUStorageManager::CopyGPUStatusToGPU(const GPUStatus &gpu_status) {
checkCudaErrors(hipMemcpyToSymbol(
d_gpu_status, &gpu_status, sizeof(gpu_status), 0,
hipMemcpyHostToDevice));
}
void GPUStorageManager::CopyGlobalToGPU(
const Global &global,
hipStream_t stream) {
checkCudaErrors(hipMemcpyToSymbolAsync(
d_global, &global, sizeof(global), 0,
hipMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyVconToGPU(
const VertexContent &vcon,
hipStream_t stream) {
checkCudaErrors(hipMemcpyToSymbolAsync(
d_vcon, &vcon, sizeof(vcon), 0,
hipMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyEconToGPU(
const EdgeContent &econ,
hipStream_t stream) {
checkCudaErrors(hipMemcpyToSymbolAsync(
d_econ, &econ, sizeof(econ), 0,
hipMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyMconSendToGPU(
const MessageContent &mcon_send,
hipStream_t stream) {
checkCudaErrors(hipMemcpyToSymbolAsync(
d_mcon_send, &mcon_send, sizeof(mcon_send), 0,
hipMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyMconRecvToGPU(
const MessageContent &mcon_recv,
hipStream_t stream) {
checkCudaErrors(hipMemcpyToSymbolAsync(
d_mcon_recv, &mcon_recv, sizeof(mcon_recv), 0,
hipMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyAuxiliaryToGPU(
const AuxiliaryDeviceData &auxiliary,
hipStream_t stream) {
checkCudaErrors(hipMemcpyToSymbolAsync(
d_auxiliary, &auxiliary, sizeof(auxiliary), 0,
hipMemcpyHostToDevice, stream));
}
// Below are other public functions.
void GPUStorageManager::SingleGPUBuildIndexes(
VertexContent *vcon,
EdgeContent *econ,
AuxiliaryDeviceData *auxiliary) {
SingleGPUBuildVconIndexes(vcon);
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
// d_vid_index[i] is the final position of vertex who owns id i.
unsigned int *d_vid_index = NULL;
checkCudaErrors(hipMalloc(&d_vid_index, vcon->d_size * sizeof(unsigned int)));
hipLaunchKernelGGL(( K_SelfGather), dim3(GetNumBlocks(vcon->d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
vcon->d_id, vcon->d_size, d_vid_index);
SingleGPUBuildEconIndexes(d_vid_index, vcon->d_size, econ);
// TODO(laigd): We may allocate @auxiliary here instead of doing so in
// gpu_control_thread_data_types?
SingleGPUBuildAuxiliaryIndexes(d_vid_index, *vcon, *econ, auxiliary);
checkCudaErrors(hipFree(d_vid_index));
#else
SingleGPUBuildEconIndexes(NULL, vcon->d_size, econ);
SingleGPUBuildAuxiliaryIndexes(*vcon, *econ, auxiliary);
#endif
VertexContentManager::InitOutMembers(vcon);
EdgeContentManager::InitOutMembers(econ);
}
void GPUStorageManager::UserCompute(VertexContent *vcon) {
static unsigned int num_threads_per_block = conf->GetNumThreadsPerBlock();
static unsigned int num_blocks =
(vcon->d_size + num_threads_per_block - 1) / num_threads_per_block;
#ifdef LAMBDA_DEBUG
DBG_WRAP_COUT(
cout << LAMBDA_HEADER << "GPUStorageManager::UserCompute, "
<< "num_threads_per_block: " << num_threads_per_block
<< ", num_blocks: " << num_blocks
<< endl;
);
#endif
hipLaunchKernelGGL(( K_UserCompute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, );
checkCudaErrors(hipDeviceSynchronize());
}
// Below are private functions.
void GPUStorageManager::SingleGPUBuildVconIndexes(VertexContent *vcon) {
unsigned int *d_shuffle_index, *d_tmp_buf;
checkCudaErrors(hipMalloc(&d_shuffle_index, vcon->d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_tmp_buf, vcon->d_size * sizeof(unsigned int)));
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + vcon->d_size);
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
// Sort the vertex content according to in_edge_count.
thrust::sort(thr_shuffle_index, thr_shuffle_index + vcon->d_size, SortVconByInEdgeCount_LT());
#else
// Sort the vertex content according to its id.
thrust::sort(thr_shuffle_index, thr_shuffle_index + vcon->d_size, SortVconById_LT());
#endif
VertexContentManager::ShuffleInMembers(vcon, thr_shuffle_index, d_tmp_buf);
#if defined(LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT) \
&& defined(LAMBDA_IN_EDGE_COALESCED_MEMORY_ACCESS)
// Do nothing.
#else
thrust::device_ptr<unsigned int> thr_in_edge_count(vcon->d_in_edge_count);
thrust::inclusive_scan(thr_in_edge_count, thr_in_edge_count + vcon->d_size, thr_in_edge_count);
#endif
// Do an inclusive scan on out_edge_count so that we can find the starting and
// ending index of out edges of each vertex.
thrust::device_ptr<unsigned int> thr_out_edge_count(vcon->d_out_edge_count);
thrust::inclusive_scan(thr_out_edge_count, thr_out_edge_count + vcon->d_size, thr_out_edge_count);
checkCudaErrors(hipFree(d_shuffle_index));
checkCudaErrors(hipFree(d_tmp_buf));
}
void GPUStorageManager::SingleGPUBuildEconIndexes(
const unsigned int *d_vid_index,
const unsigned int num_vertexes,
EdgeContent *econ) {
unsigned int *d_edge_from_vid_index, *d_shuffle_index, *d_tmp_buf;
checkCudaErrors(hipMalloc(&d_edge_from_vid_index, econ->d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_shuffle_index, econ->d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_tmp_buf, econ->d_size * sizeof(unsigned int)));
thrust::device_ptr<unsigned int> thr_rank(d_edge_from_vid_index);
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
hipLaunchKernelGGL(( K_SingleGPUFindSortedVidIndex), dim3(GetNumBlocks(econ->d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_vid_index,
econ->d_from,
econ->d_size,
d_edge_from_vid_index);
checkCudaErrors(hipDeviceSynchronize());
// Now d_edge_from_vid_index stores the rank of each member of d_from.
#else
thrust::device_ptr<unsigned int> thr_econ_from(econ->d_from);
thrust::copy(thr_econ_from, thr_econ_from + econ->d_size, thr_rank);
#endif
// Sort the edge content according to the rank.
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + econ->d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ->d_size, thr_shuffle_index);
EdgeContentManager::ShuffleInMembers(econ, thr_shuffle_index, d_tmp_buf);
checkCudaErrors(hipFree(d_edge_from_vid_index));
checkCudaErrors(hipFree(d_shuffle_index));
checkCudaErrors(hipFree(d_tmp_buf));
}
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
#ifdef LAMBDA_IN_EDGE_COALESCED_MEMORY_ACCESS
void GPUStorageManager::SingleGPUBuildAuxiliaryIndexes(
const unsigned int *d_vid_index,
const VertexContent &vcon,
const EdgeContent &econ,
AuxiliaryDeviceData *auxiliary) {
#ifdef LAMBDA_DEBUG
unsigned int *buf;
checkCudaErrors(hipHostMalloc(&buf, ::max(vcon.d_size, econ.d_size) * sizeof(unsigned int)));
#endif
unsigned int *d_edge_to_vid_index;
unsigned int *d_in_edge_count_prefix_sum;
unsigned int *d_org_shuffle_index;
unsigned int *d_shuffle_index;
unsigned int *d_transpose_index;
checkCudaErrors(hipMalloc(&d_edge_to_vid_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_in_edge_count_prefix_sum, vcon.d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_org_shuffle_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_shuffle_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_transpose_index, econ.d_size * sizeof(unsigned int)));
hipLaunchKernelGGL(( K_SingleGPUFindSortedVidIndex), dim3(GetNumBlocks(econ.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_vid_index,
econ.d_to,
econ.d_size,
d_edge_to_vid_index);
checkCudaErrors(hipDeviceSynchronize());
thrust::device_ptr<unsigned int> thr_rank(d_edge_to_vid_index);
thrust::device_ptr<unsigned int> thr_org_shuffle_index(d_org_shuffle_index);
thrust::sequence(thr_org_shuffle_index, thr_org_shuffle_index + econ.d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ.d_size, thr_org_shuffle_index);
hipLaunchKernelGGL(( K_Invert), dim3(GetNumBlocks(vcon.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
vcon.d_in_edge_count, vcon.d_size, d_in_edge_count_prefix_sum);
thrust::device_ptr<unsigned int> thr_in_edge_count(d_in_edge_count_prefix_sum);
thrust::inclusive_scan(thr_in_edge_count, thr_in_edge_count + vcon.d_size, thr_in_edge_count);
hipLaunchKernelGGL(( K_SingleGPUTranspose), dim3(GetNumBlocks(econ.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_edge_to_vid_index, // Must contain continuous blocks of natural number.
econ.d_size,
d_in_edge_count_prefix_sum,
vcon.d_size,
d_transpose_index);
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::device_ptr<unsigned int> thr_transpose_index(d_transpose_index);
thrust::scatter(
thr_org_shuffle_index, thr_org_shuffle_index + econ.d_size,
thr_transpose_index, thr_shuffle_index);
thrust::device_ptr<unsigned int> thr_from(econ.d_from);
thrust::device_ptr<unsigned int> thr_in_msg_from(auxiliary->d_in_msg_from);
thrust::gather(
thr_shuffle_index, thr_shuffle_index + econ.d_size,
thr_from, thr_in_msg_from);
hipLaunchKernelGGL(( K_SingleGPUCalculateInMsgNext), dim3(GetNumBlocks(econ.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_edge_to_vid_index, d_transpose_index, econ.d_size, auxiliary->d_in_msg_next);
hipLaunchKernelGGL(( K_SelfGather), dim3(GetNumBlocks(econ.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_shuffle_index, econ.d_size, auxiliary->d_out_edge_in_msg_map);
#ifdef LAMBDA_DEBUG
DEBUG_OUTPUT(buf, d_vid_index, "vid_index: ", vcon.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_edge_to_vid_index, "edge_to_vid_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_in_edge_count_prefix_sum, "in_edge_count_prefix_sum: ", vcon.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_org_shuffle_index, "org_shuffle_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_shuffle_index, "shuffle_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_transpose_index, "transpose_index: ", econ.d_size, unsigned int);
#endif
checkCudaErrors(hipFree(d_edge_to_vid_index));
checkCudaErrors(hipFree(d_in_edge_count_prefix_sum));
checkCudaErrors(hipFree(d_org_shuffle_index));
checkCudaErrors(hipFree(d_shuffle_index));
checkCudaErrors(hipFree(d_transpose_index));
#ifdef LAMBDA_DEBUG
checkCudaErrors(hipHostFree(buf));
#endif
}
#else // Sorting vertexes by in_edge_count without coalesced memory access.
void GPUStorageManager::SingleGPUBuildAuxiliaryIndexes(
const unsigned int *d_vid_index,
const VertexContent &vcon,
const EdgeContent &econ,
AuxiliaryDeviceData *auxiliary) {
unsigned int *d_edge_to_vid_index, *d_shuffle_index;
checkCudaErrors(hipMalloc(&d_edge_to_vid_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_shuffle_index, econ.d_size * sizeof(unsigned int)));
hipLaunchKernelGGL(( K_SingleGPUFindSortedVidIndex), dim3(GetNumBlocks(econ.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_vid_index,
econ.d_to,
econ.d_size,
d_edge_to_vid_index);
checkCudaErrors(hipDeviceSynchronize());
thrust::device_ptr<unsigned int> thr_rank(d_edge_to_vid_index);
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + econ.d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ.d_size, thr_shuffle_index);
thrust::device_ptr<unsigned int> thr_from(econ.d_from);
thrust::device_ptr<unsigned int> thr_in_msg_from(auxiliary->d_in_msg_from);
thrust::gather(
thr_shuffle_index, thr_shuffle_index + econ.d_size,
thr_from, thr_in_msg_from);
hipLaunchKernelGGL(( K_SelfGather), dim3(GetNumBlocks(econ.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_shuffle_index, econ.d_size, auxiliary->d_out_edge_in_msg_map);
checkCudaErrors(hipFree(d_edge_to_vid_index));
checkCudaErrors(hipFree(d_shuffle_index));
}
#endif
#else // Not sorting vertexes by in_edge_count
void GPUStorageManager::SingleGPUBuildAuxiliaryIndexes(
const VertexContent &vcon,
const EdgeContent &econ,
AuxiliaryDeviceData *auxiliary) {
#ifdef LAMBDA_DEBUG
unsigned int *buf;
checkCudaErrors(hipHostMalloc(&buf, ::max(vcon.d_size, econ.d_size) * sizeof(unsigned int)));
#endif
unsigned int *d_edge_to_vid_index, *d_shuffle_index;
checkCudaErrors(hipMalloc(&d_edge_to_vid_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_shuffle_index, econ.d_size * sizeof(unsigned int)));
thrust::device_ptr<unsigned int> thr_rank(d_edge_to_vid_index);
thrust::device_ptr<unsigned int> thr_econ_to(econ.d_to);
thrust::copy(thr_econ_to, thr_econ_to + econ.d_size, thr_rank);
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + econ.d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ.d_size, thr_shuffle_index);
thrust::device_ptr<unsigned int> thr_from(econ.d_from);
thrust::device_ptr<unsigned int> thr_in_msg_from(auxiliary->d_in_msg_from);
thrust::gather(
thr_shuffle_index, thr_shuffle_index + econ.d_size,
thr_from, thr_in_msg_from);
hipLaunchKernelGGL(( K_SelfGather), dim3(GetNumBlocks(econ.d_size)), dim3(kDefaultNumThreadsPerBlock), 0, 0,
d_shuffle_index, econ.d_size, auxiliary->d_out_edge_in_msg_map);
#ifdef LAMBDA_DEBUG
DEBUG_OUTPUT(buf, d_edge_to_vid_index, "edge_to_vid_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_shuffle_index, "shuffle_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, auxiliary->d_in_msg_from, "in_msg_from: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, auxiliary->d_out_edge_in_msg_map, "out_edge_in_msg_map: ", econ.d_size, unsigned int);
#endif
checkCudaErrors(hipFree(d_edge_to_vid_index));
checkCudaErrors(hipFree(d_shuffle_index));
#ifdef LAMBDA_DEBUG
checkCudaErrors(hipHostFree(buf));
#endif
}
#endif
#ifdef LAMBDA_DEBUG
#undef LAMBDA_HEADER
#endif
| 995e71e5ac88b1c31ba3d4a60c9c9ebe7318b9c6.cu | // *****************************************************************************
// Filename: gpu_storage.cc
// Date: 2012-12-25 10:01
// Author: Guangda Lai
// Email: lambda2fei@gmail.com
// Description: TODO(laigd): Put the file description here.
// *****************************************************************************
#include "gpu_storage.h"
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "config.h"
#include "constants.h"
#include "device_graph_data.h"
#include "device_graph_data_types.h"
#include "device_hash_functions.h"
#include "device_util.h"
#include "edge_content_manager.h"
#include "gpu_status.h"
#include "gpu_storage.h"
#include "user_api.h"
#include "vertex_content_manager.h"
#ifdef LAMBDA_DEBUG
#include "debug.h"
#define LAMBDA_HEADER "---> [GPUStorageManager]: "
#endif
/********************* Helper data structure and functions ********************/
struct SortVconById_LT {
__device__ bool operator()(const unsigned int idx1, const unsigned int idx2) {
return d_vcon.d_id[idx1] < d_vcon.d_id[idx2];
}
};
struct SortVconByInEdgeCount_LT {
__device__ bool operator()(const unsigned int idx1, const unsigned int idx2) {
return d_vcon.d_in_edge_count[idx1] == d_vcon.d_in_edge_count[idx2]
? d_vcon.d_out_edge_count[idx1] > d_vcon.d_out_edge_count[idx2]
: d_vcon.d_in_edge_count[idx1] > d_vcon.d_in_edge_count[idx2];
}
};
__global__ void K_SelfGather(
const unsigned int *src,
const unsigned int size,
unsigned int *dst) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(size);
for (unsigned int i = tid; i < size; i += num_threads) {
dst[src[i]] = i;
}
}
__global__ void K_Invert(
const unsigned int *src,
const unsigned int size,
unsigned int *dst) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(size);
for (unsigned int i = tid; i < size; i += num_threads) {
dst[i] = src[size - 1 - i];
}
}
// @value_to_find contains @num_value vertex IDs, and @vid_index[i] is the final
// position of vertex who owns id i. What we are going to do in this function is
// let out_index[i] to be the position of vertex who owns id value_to_find[i].
__global__ void K_SingleGPUFindSortedVidIndex(
const unsigned int *vid_index,
const unsigned int *value_to_find,
const unsigned int num_value,
unsigned int *out_index) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(num_value);
for (unsigned int i = tid; i < num_value; i += num_threads) {
out_index[i] = vid_index[value_to_find[i]];
}
}
// @array_of_blocks contains several blocks of data. Each block contains all
// same elements, the first block are all 0s, the second all 1s and so on, and
// if block A is presented earlier than block B in the array, then the size of
// block A is bigger than or equal to the size of block B.
//
// @prefix_sum_of_sorted_block_size contains the prefix sum of the sorted (from
// small to large) block size of each block in array_of_blocks.
__global__ void K_SingleGPUTranspose(
const unsigned int *array_of_blocks,
const unsigned int array_size,
const unsigned int *prefix_sum_of_sorted_block_size,
const unsigned int num_blocks,
unsigned int *trans_index) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(array_size);
for (unsigned int i = tid; i < array_size; i += num_threads) {
const unsigned int val = array_of_blocks[i];
unsigned int pow;
for (pow = 1; i >= pow && array_of_blocks[i - pow] == val; pow <<= 1);
pow >>= 1;
unsigned int first = i - pow;
while (pow) {
if (first >= pow && array_of_blocks[first - pow] == val) first -= pow;
pow >>= 1;
}
unsigned int row = i - first; // Starts from 0.
unsigned int l = 0, r = num_blocks;
while (l < r) {
unsigned int mid = (l + r) >> 1;
unsigned int cmp_val =
mid == 0 ? prefix_sum_of_sorted_block_size[0] :
prefix_sum_of_sorted_block_size[mid]
- prefix_sum_of_sorted_block_size[mid - 1];
if (row < cmp_val) {
r = mid;
} else {
l = mid + 1;
}
}
unsigned int result = (r == 0 ? 0 : prefix_sum_of_sorted_block_size[r - 1]);
result += (num_blocks - r) * row;
// We can add "array_of_blocks[i]" here because we assume that if k < i,
// then the size of block which contains value of array_of_blocks[k] is
// bigger than that of array_of_blocks[i].
trans_index[i] = result + array_of_blocks[i];
}
}
__global__ void K_SingleGPUCalculateInMsgNext(
const unsigned int *array_of_blocks,
const unsigned int *transpose_index,
const unsigned int array_size,
unsigned int *transposed_next) {
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(array_size);
for (unsigned int i = tid; i < array_size; i += num_threads) {
if (i != array_size - 1 && array_of_blocks[i] == array_of_blocks[i + 1]) {
transposed_next[transpose_index[i]] = transpose_index[i + 1];
} else {
transposed_next[transpose_index[i]] = ~0U;
}
}
}
__global__ void sendmessage(OutEdgeIterator it,int mindist,int edgenum){
int idx = blockDim.x*blockIdx.x+threadIdx.x;
if(idx<edgenum){
it.AddOffset(idx);
// printf("inner sendmessate idx=%d\n",idx);
Message msg(*it);
msg.set_dist(mindist + it->get_weight());
msg.Send();
}
}
/*
__global__ void K_UserCompute() {
const unsigned int num_vertex = d_vcon.d_size;
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(num_vertex);
for (unsigned int idx = tid; idx < num_vertex; idx += num_threads) {
Vertex v(idx);
MessageIterator msgs(idx);
int message = v.Compute(&msgs);
if(message > -1){
int threadNum = v.get_out_edge_count() ;
OutEdgeIterator it = v.GetOutEdgeIterator();
if(threadNum > 100){
sendmessage<<<1,threadNum>>>(it,message);
}else{
for (; !it.Done(); it.Next()) {
Message msg(*it);
msg.set_dist(message + it->get_weight());
msg.Send();
}
}
}
}
}
*/
__global__ void K_UserCompute() {
const unsigned int num_vertex = d_vcon.d_size;
GET_NUM_THREAD_TID_AND_RETURN_IF_TID_GE(num_vertex);
for (unsigned int idx = tid; idx < num_vertex; idx += num_threads) {
Vertex v(idx);
MessageIterator msgs(idx);
int message = v.Compute(&msgs);
if(message == -1){
return;
}else{
int out_edge_Num = v.get_out_edge_count() ;
OutEdgeIterator it = v.GetOutEdgeIterator();
if(out_edge_Num>25 ){
// printf("1500 use dyp\n");
// int grid =(out_edge_Num-1)/16 +1;
sendmessage<<<1,64>>>(it,message,out_edge_Num);
}else{
for (it = v.GetOutEdgeIterator(); !it.Done(); it.Next()) {
Message msg(*it);
msg.set_dist(message + it->get_weight());
msg.Send();
}
}
}
}
}
// namespace
/******************* GPUStorageManager function definition ********************/
GPUStorageManager::GPUStorageManager()
: conf(NULL),
msg_per_gpu_begin_calculated(false),
msg_per_gpu_begin(NULL),
msg_per_gpu_end(NULL) {
}
GPUStorageManager::~GPUStorageManager() {
if (msg_per_gpu_begin != NULL) delete[] msg_per_gpu_begin;
if (msg_per_gpu_end != NULL) delete[] msg_per_gpu_end;
}
void GPUStorageManager::Init(const Config *c) {
conf = c;
msg_per_gpu_begin = new unsigned int[conf->GetNumGPUControlThreads()];
msg_per_gpu_end = new unsigned int[conf->GetNumGPUControlThreads()];
}
// Below are functions dealing with constant memory.
void GPUStorageManager::GetGPUStatusFromGPU(GPUStatus *gpu_status) {
checkCudaErrors(cudaMemcpyFromSymbol(
gpu_status, d_gpu_status, sizeof(*gpu_status), 0,
cudaMemcpyDeviceToHost));
}
void GPUStorageManager::CopyGPUStatusToGPU(const GPUStatus &gpu_status) {
checkCudaErrors(cudaMemcpyToSymbol(
d_gpu_status, &gpu_status, sizeof(gpu_status), 0,
cudaMemcpyHostToDevice));
}
void GPUStorageManager::CopyGlobalToGPU(
const Global &global,
cudaStream_t stream) {
checkCudaErrors(cudaMemcpyToSymbolAsync(
d_global, &global, sizeof(global), 0,
cudaMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyVconToGPU(
const VertexContent &vcon,
cudaStream_t stream) {
checkCudaErrors(cudaMemcpyToSymbolAsync(
d_vcon, &vcon, sizeof(vcon), 0,
cudaMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyEconToGPU(
const EdgeContent &econ,
cudaStream_t stream) {
checkCudaErrors(cudaMemcpyToSymbolAsync(
d_econ, &econ, sizeof(econ), 0,
cudaMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyMconSendToGPU(
const MessageContent &mcon_send,
cudaStream_t stream) {
checkCudaErrors(cudaMemcpyToSymbolAsync(
d_mcon_send, &mcon_send, sizeof(mcon_send), 0,
cudaMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyMconRecvToGPU(
const MessageContent &mcon_recv,
cudaStream_t stream) {
checkCudaErrors(cudaMemcpyToSymbolAsync(
d_mcon_recv, &mcon_recv, sizeof(mcon_recv), 0,
cudaMemcpyHostToDevice, stream));
}
void GPUStorageManager::CopyAuxiliaryToGPU(
const AuxiliaryDeviceData &auxiliary,
cudaStream_t stream) {
checkCudaErrors(cudaMemcpyToSymbolAsync(
d_auxiliary, &auxiliary, sizeof(auxiliary), 0,
cudaMemcpyHostToDevice, stream));
}
// Below are other public functions.
void GPUStorageManager::SingleGPUBuildIndexes(
VertexContent *vcon,
EdgeContent *econ,
AuxiliaryDeviceData *auxiliary) {
SingleGPUBuildVconIndexes(vcon);
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
// d_vid_index[i] is the final position of vertex who owns id i.
unsigned int *d_vid_index = NULL;
checkCudaErrors(cudaMalloc(&d_vid_index, vcon->d_size * sizeof(unsigned int)));
K_SelfGather<<<GetNumBlocks(vcon->d_size), kDefaultNumThreadsPerBlock>>>(
vcon->d_id, vcon->d_size, d_vid_index);
SingleGPUBuildEconIndexes(d_vid_index, vcon->d_size, econ);
// TODO(laigd): We may allocate @auxiliary here instead of doing so in
// gpu_control_thread_data_types?
SingleGPUBuildAuxiliaryIndexes(d_vid_index, *vcon, *econ, auxiliary);
checkCudaErrors(cudaFree(d_vid_index));
#else
SingleGPUBuildEconIndexes(NULL, vcon->d_size, econ);
SingleGPUBuildAuxiliaryIndexes(*vcon, *econ, auxiliary);
#endif
VertexContentManager::InitOutMembers(vcon);
EdgeContentManager::InitOutMembers(econ);
}
void GPUStorageManager::UserCompute(VertexContent *vcon) {
static unsigned int num_threads_per_block = conf->GetNumThreadsPerBlock();
static unsigned int num_blocks =
(vcon->d_size + num_threads_per_block - 1) / num_threads_per_block;
#ifdef LAMBDA_DEBUG
DBG_WRAP_COUT(
cout << LAMBDA_HEADER << "GPUStorageManager::UserCompute, "
<< "num_threads_per_block: " << num_threads_per_block
<< ", num_blocks: " << num_blocks
<< endl;
);
#endif
K_UserCompute<<<num_blocks, num_threads_per_block>>>();
checkCudaErrors(cudaDeviceSynchronize());
}
// Below are private functions.
void GPUStorageManager::SingleGPUBuildVconIndexes(VertexContent *vcon) {
unsigned int *d_shuffle_index, *d_tmp_buf;
checkCudaErrors(cudaMalloc(&d_shuffle_index, vcon->d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_tmp_buf, vcon->d_size * sizeof(unsigned int)));
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + vcon->d_size);
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
// Sort the vertex content according to in_edge_count.
thrust::sort(thr_shuffle_index, thr_shuffle_index + vcon->d_size, SortVconByInEdgeCount_LT());
#else
// Sort the vertex content according to its id.
thrust::sort(thr_shuffle_index, thr_shuffle_index + vcon->d_size, SortVconById_LT());
#endif
VertexContentManager::ShuffleInMembers(vcon, thr_shuffle_index, d_tmp_buf);
#if defined(LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT) \
&& defined(LAMBDA_IN_EDGE_COALESCED_MEMORY_ACCESS)
// Do nothing.
#else
thrust::device_ptr<unsigned int> thr_in_edge_count(vcon->d_in_edge_count);
thrust::inclusive_scan(thr_in_edge_count, thr_in_edge_count + vcon->d_size, thr_in_edge_count);
#endif
// Do an inclusive scan on out_edge_count so that we can find the starting and
// ending index of out edges of each vertex.
thrust::device_ptr<unsigned int> thr_out_edge_count(vcon->d_out_edge_count);
thrust::inclusive_scan(thr_out_edge_count, thr_out_edge_count + vcon->d_size, thr_out_edge_count);
checkCudaErrors(cudaFree(d_shuffle_index));
checkCudaErrors(cudaFree(d_tmp_buf));
}
void GPUStorageManager::SingleGPUBuildEconIndexes(
const unsigned int *d_vid_index,
const unsigned int num_vertexes,
EdgeContent *econ) {
unsigned int *d_edge_from_vid_index, *d_shuffle_index, *d_tmp_buf;
checkCudaErrors(cudaMalloc(&d_edge_from_vid_index, econ->d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_shuffle_index, econ->d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_tmp_buf, econ->d_size * sizeof(unsigned int)));
thrust::device_ptr<unsigned int> thr_rank(d_edge_from_vid_index);
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
K_SingleGPUFindSortedVidIndex<<<GetNumBlocks(econ->d_size), kDefaultNumThreadsPerBlock>>>(
d_vid_index,
econ->d_from,
econ->d_size,
d_edge_from_vid_index);
checkCudaErrors(cudaDeviceSynchronize());
// Now d_edge_from_vid_index stores the rank of each member of d_from.
#else
thrust::device_ptr<unsigned int> thr_econ_from(econ->d_from);
thrust::copy(thr_econ_from, thr_econ_from + econ->d_size, thr_rank);
#endif
// Sort the edge content according to the rank.
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + econ->d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ->d_size, thr_shuffle_index);
EdgeContentManager::ShuffleInMembers(econ, thr_shuffle_index, d_tmp_buf);
checkCudaErrors(cudaFree(d_edge_from_vid_index));
checkCudaErrors(cudaFree(d_shuffle_index));
checkCudaErrors(cudaFree(d_tmp_buf));
}
#ifdef LAMBDA_SORT_VERTEX_BY_IN_EDGE_COUNT
#ifdef LAMBDA_IN_EDGE_COALESCED_MEMORY_ACCESS
void GPUStorageManager::SingleGPUBuildAuxiliaryIndexes(
const unsigned int *d_vid_index,
const VertexContent &vcon,
const EdgeContent &econ,
AuxiliaryDeviceData *auxiliary) {
#ifdef LAMBDA_DEBUG
unsigned int *buf;
checkCudaErrors(cudaMallocHost(&buf, std::max(vcon.d_size, econ.d_size) * sizeof(unsigned int)));
#endif
unsigned int *d_edge_to_vid_index;
unsigned int *d_in_edge_count_prefix_sum;
unsigned int *d_org_shuffle_index;
unsigned int *d_shuffle_index;
unsigned int *d_transpose_index;
checkCudaErrors(cudaMalloc(&d_edge_to_vid_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_in_edge_count_prefix_sum, vcon.d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_org_shuffle_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_shuffle_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_transpose_index, econ.d_size * sizeof(unsigned int)));
K_SingleGPUFindSortedVidIndex<<<GetNumBlocks(econ.d_size), kDefaultNumThreadsPerBlock>>>(
d_vid_index,
econ.d_to,
econ.d_size,
d_edge_to_vid_index);
checkCudaErrors(cudaDeviceSynchronize());
thrust::device_ptr<unsigned int> thr_rank(d_edge_to_vid_index);
thrust::device_ptr<unsigned int> thr_org_shuffle_index(d_org_shuffle_index);
thrust::sequence(thr_org_shuffle_index, thr_org_shuffle_index + econ.d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ.d_size, thr_org_shuffle_index);
K_Invert<<<GetNumBlocks(vcon.d_size), kDefaultNumThreadsPerBlock>>>(
vcon.d_in_edge_count, vcon.d_size, d_in_edge_count_prefix_sum);
thrust::device_ptr<unsigned int> thr_in_edge_count(d_in_edge_count_prefix_sum);
thrust::inclusive_scan(thr_in_edge_count, thr_in_edge_count + vcon.d_size, thr_in_edge_count);
K_SingleGPUTranspose<<<GetNumBlocks(econ.d_size), kDefaultNumThreadsPerBlock>>>(
d_edge_to_vid_index, // Must contain continuous blocks of natural number.
econ.d_size,
d_in_edge_count_prefix_sum,
vcon.d_size,
d_transpose_index);
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::device_ptr<unsigned int> thr_transpose_index(d_transpose_index);
thrust::scatter(
thr_org_shuffle_index, thr_org_shuffle_index + econ.d_size,
thr_transpose_index, thr_shuffle_index);
thrust::device_ptr<unsigned int> thr_from(econ.d_from);
thrust::device_ptr<unsigned int> thr_in_msg_from(auxiliary->d_in_msg_from);
thrust::gather(
thr_shuffle_index, thr_shuffle_index + econ.d_size,
thr_from, thr_in_msg_from);
K_SingleGPUCalculateInMsgNext<<<GetNumBlocks(econ.d_size), kDefaultNumThreadsPerBlock>>>(
d_edge_to_vid_index, d_transpose_index, econ.d_size, auxiliary->d_in_msg_next);
K_SelfGather<<<GetNumBlocks(econ.d_size), kDefaultNumThreadsPerBlock>>>(
d_shuffle_index, econ.d_size, auxiliary->d_out_edge_in_msg_map);
#ifdef LAMBDA_DEBUG
DEBUG_OUTPUT(buf, d_vid_index, "vid_index: ", vcon.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_edge_to_vid_index, "edge_to_vid_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_in_edge_count_prefix_sum, "in_edge_count_prefix_sum: ", vcon.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_org_shuffle_index, "org_shuffle_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_shuffle_index, "shuffle_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_transpose_index, "transpose_index: ", econ.d_size, unsigned int);
#endif
checkCudaErrors(cudaFree(d_edge_to_vid_index));
checkCudaErrors(cudaFree(d_in_edge_count_prefix_sum));
checkCudaErrors(cudaFree(d_org_shuffle_index));
checkCudaErrors(cudaFree(d_shuffle_index));
checkCudaErrors(cudaFree(d_transpose_index));
#ifdef LAMBDA_DEBUG
checkCudaErrors(cudaFreeHost(buf));
#endif
}
#else // Sorting vertexes by in_edge_count without coalesced memory access.
void GPUStorageManager::SingleGPUBuildAuxiliaryIndexes(
const unsigned int *d_vid_index,
const VertexContent &vcon,
const EdgeContent &econ,
AuxiliaryDeviceData *auxiliary) {
unsigned int *d_edge_to_vid_index, *d_shuffle_index;
checkCudaErrors(cudaMalloc(&d_edge_to_vid_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_shuffle_index, econ.d_size * sizeof(unsigned int)));
K_SingleGPUFindSortedVidIndex<<<GetNumBlocks(econ.d_size), kDefaultNumThreadsPerBlock>>>(
d_vid_index,
econ.d_to,
econ.d_size,
d_edge_to_vid_index);
checkCudaErrors(cudaDeviceSynchronize());
thrust::device_ptr<unsigned int> thr_rank(d_edge_to_vid_index);
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + econ.d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ.d_size, thr_shuffle_index);
thrust::device_ptr<unsigned int> thr_from(econ.d_from);
thrust::device_ptr<unsigned int> thr_in_msg_from(auxiliary->d_in_msg_from);
thrust::gather(
thr_shuffle_index, thr_shuffle_index + econ.d_size,
thr_from, thr_in_msg_from);
K_SelfGather<<<GetNumBlocks(econ.d_size), kDefaultNumThreadsPerBlock>>>(
d_shuffle_index, econ.d_size, auxiliary->d_out_edge_in_msg_map);
checkCudaErrors(cudaFree(d_edge_to_vid_index));
checkCudaErrors(cudaFree(d_shuffle_index));
}
#endif
#else // Not sorting vertexes by in_edge_count
void GPUStorageManager::SingleGPUBuildAuxiliaryIndexes(
const VertexContent &vcon,
const EdgeContent &econ,
AuxiliaryDeviceData *auxiliary) {
#ifdef LAMBDA_DEBUG
unsigned int *buf;
checkCudaErrors(cudaMallocHost(&buf, std::max(vcon.d_size, econ.d_size) * sizeof(unsigned int)));
#endif
unsigned int *d_edge_to_vid_index, *d_shuffle_index;
checkCudaErrors(cudaMalloc(&d_edge_to_vid_index, econ.d_size * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_shuffle_index, econ.d_size * sizeof(unsigned int)));
thrust::device_ptr<unsigned int> thr_rank(d_edge_to_vid_index);
thrust::device_ptr<unsigned int> thr_econ_to(econ.d_to);
thrust::copy(thr_econ_to, thr_econ_to + econ.d_size, thr_rank);
thrust::device_ptr<unsigned int> thr_shuffle_index(d_shuffle_index);
thrust::sequence(thr_shuffle_index, thr_shuffle_index + econ.d_size);
thrust::sort_by_key(thr_rank, thr_rank + econ.d_size, thr_shuffle_index);
thrust::device_ptr<unsigned int> thr_from(econ.d_from);
thrust::device_ptr<unsigned int> thr_in_msg_from(auxiliary->d_in_msg_from);
thrust::gather(
thr_shuffle_index, thr_shuffle_index + econ.d_size,
thr_from, thr_in_msg_from);
K_SelfGather<<<GetNumBlocks(econ.d_size), kDefaultNumThreadsPerBlock>>>(
d_shuffle_index, econ.d_size, auxiliary->d_out_edge_in_msg_map);
#ifdef LAMBDA_DEBUG
DEBUG_OUTPUT(buf, d_edge_to_vid_index, "edge_to_vid_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, d_shuffle_index, "shuffle_index: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, auxiliary->d_in_msg_from, "in_msg_from: ", econ.d_size, unsigned int);
DEBUG_OUTPUT(buf, auxiliary->d_out_edge_in_msg_map, "out_edge_in_msg_map: ", econ.d_size, unsigned int);
#endif
checkCudaErrors(cudaFree(d_edge_to_vid_index));
checkCudaErrors(cudaFree(d_shuffle_index));
#ifdef LAMBDA_DEBUG
checkCudaErrors(cudaFreeHost(buf));
#endif
}
#endif
#ifdef LAMBDA_DEBUG
#undef LAMBDA_HEADER
#endif
|
91e7b14f49f2bd48c9812dc63fe1dbee58993d49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "cutil_inline.h"
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// initialise card
cutilDeviceInit(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
cudaSafeCall(hipMalloc((void **)&d_x, nsize*sizeof(float)));
// execute kernel
hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x);
cudaCheckMsg("my_first_kernel execution failed\n");
// copy back results and print them out
cudaSafeCall( hipMemcpy(h_x,d_x,nsize*sizeof(float),
hipMemcpyDeviceToHost) );
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
cudaSafeCall(hipFree(d_x));
free(h_x);
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
return 0;
}
| 91e7b14f49f2bd48c9812dc63fe1dbee58993d49.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "cutil_inline.h"
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, char **argv)
{
float *h_x, *d_x;
int nblocks, nthreads, nsize, n;
// initialise card
cutilDeviceInit(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
h_x = (float *)malloc(nsize*sizeof(float));
cudaSafeCall(cudaMalloc((void **)&d_x, nsize*sizeof(float)));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(d_x);
cudaCheckMsg("my_first_kernel execution failed\n");
// copy back results and print them out
cudaSafeCall( cudaMemcpy(h_x,d_x,nsize*sizeof(float),
cudaMemcpyDeviceToHost) );
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
cudaSafeCall(cudaFree(d_x));
free(h_x);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 0;
}
|
8b0c4995d938c5df0d181a2835490b799f8737fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <pthread.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#define COMPRESSION_BATCH_SIZE 32
using namespace std;
struct ThreadArg {
float *original_data;
long num_elements;
int thread_num;
float *compressed_data;
unsigned int *mask;
};
int n_threads = 8;
long layer_sizes[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256,
13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384,
13l * 13 * 256, 6l * 6 * 256};
int num_layers = 8;
void *compressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float *compressed_data = thread_arg->compressed_data;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches =
num_elements / n_threads / COMPRESSION_BATCH_SIZE;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos =
(i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
mask[mask_pos] = 0;
for (long j = i * COMPRESSION_BATCH_SIZE + start;
j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (original_data[j] > 0) {
mask[mask_pos] = (mask[mask_pos] << 1) + 1;
compressed_data[j] = original_data[j];
} else {
mask[mask_pos] = (mask[mask_pos] << 1);
}
}
}
return NULL;
}
int main() {
int batch_size = 128;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
pthread_t threads[n_threads];
for (int i = 0; i < num_layers; i++) {
layer_sizes[i] *= batch_size;
}
vector<float> compression_times;
float total_milli = 0.0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
float *original_data, *compressed_data;
unsigned int *mask;
hipHostMalloc((void **)&original_data, num_elements * sizeof(float));
// generate data
for (long i = 0; i < num_elements; i++) {
if (rand() % 10 < 3)
original_data[i] = 0;
else
original_data[i] = 1;
}
if (num_elements % n_threads != 0) {
cout << "bad number of threads" << endl;
exit(0);
}
if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) {
cout << "bad num_elements or n_threads" << endl;
exit(0);
}
cout << "starting " << j << endl;
hipEventRecord(start);
hipHostMalloc((void **)&compressed_data, num_elements * sizeof(float));
hipHostMalloc((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE *
sizeof(unsigned int));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &compressThread,
(void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milli;
hipEventElapsedTime(&milli, start, stop);
compression_times.push_back(milli);
total_milli += milli;
// cout << milli << endl;
hipHostFree(original_data);
hipHostFree(compressed_data);
hipHostFree(mask);
}
for (int i = 0; i < num_layers; i++) {
cout << compression_times[i] << endl;
}
cout << total_milli << endl;
} | 8b0c4995d938c5df0d181a2835490b799f8737fb.cu | #include <pthread.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#define COMPRESSION_BATCH_SIZE 32
using namespace std;
struct ThreadArg {
float *original_data;
long num_elements;
int thread_num;
float *compressed_data;
unsigned int *mask;
};
int n_threads = 8;
long layer_sizes[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256,
13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384,
13l * 13 * 256, 6l * 6 * 256};
int num_layers = 8;
void *compressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float *compressed_data = thread_arg->compressed_data;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches =
num_elements / n_threads / COMPRESSION_BATCH_SIZE;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos =
(i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
mask[mask_pos] = 0;
for (long j = i * COMPRESSION_BATCH_SIZE + start;
j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (original_data[j] > 0) {
mask[mask_pos] = (mask[mask_pos] << 1) + 1;
compressed_data[j] = original_data[j];
} else {
mask[mask_pos] = (mask[mask_pos] << 1);
}
}
}
return NULL;
}
int main() {
int batch_size = 128;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
pthread_t threads[n_threads];
for (int i = 0; i < num_layers; i++) {
layer_sizes[i] *= batch_size;
}
vector<float> compression_times;
float total_milli = 0.0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
float *original_data, *compressed_data;
unsigned int *mask;
cudaMallocHost((void **)&original_data, num_elements * sizeof(float));
// generate data
for (long i = 0; i < num_elements; i++) {
if (rand() % 10 < 3)
original_data[i] = 0;
else
original_data[i] = 1;
}
if (num_elements % n_threads != 0) {
cout << "bad number of threads" << endl;
exit(0);
}
if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) {
cout << "bad num_elements or n_threads" << endl;
exit(0);
}
cout << "starting " << j << endl;
cudaEventRecord(start);
cudaMallocHost((void **)&compressed_data, num_elements * sizeof(float));
cudaMallocHost((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE *
sizeof(unsigned int));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &compressThread,
(void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milli;
cudaEventElapsedTime(&milli, start, stop);
compression_times.push_back(milli);
total_milli += milli;
// cout << milli << endl;
cudaFreeHost(original_data);
cudaFreeHost(compressed_data);
cudaFreeHost(mask);
}
for (int i = 0; i < num_layers; i++) {
cout << compression_times[i] << endl;
}
cout << total_milli << endl;
} |
bce4cf264fc8b8034619700f6ada0a442d3d8960.hip | // !!! This is a file automatically generated by hipify!!!
/* Les 2 lignes suivantes permettent d'utiliser nvcc avec gcc 4.7 (sinon erreur de compilation) */
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
// Includes
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
//#include <cmath>
#include <ctime>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_cuda.h"
using namespace std;
/**
* Fonction remplissant le vecteur data de donnes alatoires
* comprises entre 0 et 1
*/
void gendata(float *data, int n)
{
for (int i = 0; i < n; i++)
data[i] = rand()/((float) RAND_MAX);
}
/**
* Calcul de c = a + b sur CPU
*/
void calc_cpu(float *a, float *b, float *c, int n)
{
for (int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
/**
* Calcul de l'erreur relative entre le calcul GPU et le
* calcul CPU
*/
float calcerr(float *gpu, float *cpu, int n)
{
float err_max = 0.;
for (int i = 0; i < n; i++)
{
const float res = cpu[i];
float err;
if (fabs(res) < 1e-7)
err = fabs(res - gpu[i]);
else
err = fabs((res - gpu[i])/res);
if (err > err_max)
err_max = err;
}
return err_max;
}
/**
* Kernel de calcul
*/
__global__ void vecadd_device(float *a, float *b, float *c, int n, int *tab)
//__global__ void vecadd_device(float *a, float *b, float *c, int n, int n_warp)
{
/*
if (n_warp == 0)
{
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid<n)
c[tid]=a[tid]+b[tid];
}
else
{
int offset = n_warp; const int tid = blockIdx.x*blockDim.x + threadIdx.x + offset;
//const int tid = blockIdx.x* blockDim.x + blockDim.x - 1 - threadIdx.x;
//const int tid = blockIdx.x*blockDim.x + threadIdx.x + n_warp*32;
const int tidSup = n_warp*32 + 32;
if (tid<n && tid<tidSup)
//if (tid<n)
c[tid]=a[tid]+b[tid];
}
*/
const int tid = blockIdx.x*blockDim.x + tab[threadIdx.x];
if (tid<n)
c[tid]=a[tid]+b[tid];
}
//void cleanup(float* h_A, float* h_B, float* h_C, float* h_res,
//float* d_A, float* d_B, float* d_C)
void cleanup(float* h_A, float* h_B, float* h_C, float* h_res, int* h_tab,
float* d_A, float* d_B, float* d_C, int* d_tab)
{
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
if (h_res)
free(h_res);
if (h_tab)
free(h_tab);
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
if (d_tab)
hipFree(d_tab);
}
// Host code
int main(int argc, char** argv)
{
if (argc != 2) {
cout << "Usage: addition n\n";
exit(-1);
}
printf("Addition vectorielle\n");
// Rcupration de la taille du vecteur dans argv
int n = atoi(argv[1]); // A complter
size_t size = n*sizeof(float);
// cout << "memoire total pour des float = " << size << endl;
//int n_warp = 20000000;
// Vecteurs CPU
float *h_A = nullptr, *h_B = nullptr, *h_C = nullptr;
// Vecteur GPU
float *d_A = nullptr, *d_B = nullptr, *d_C = nullptr;
// Allocatation des vecteurs dans la mmoire CPU
h_A = (float*) malloc(size);// A complter
h_B = (float*) malloc(size);
h_C = (float*) malloc(size);
float *h_res = (float*) malloc(size);
// adressage indirecte
size_t sizeInt = 1024*sizeof(int);
int *h_tab = nullptr;
h_tab = (int*) malloc(sizeInt);
int *d_tab = nullptr;
for (int i=0; i<1024; i++)
h_tab[i]=i;
//permutations alatoires des 256 premiers
for (int i=0; i<256; i++)
{
int r=rand()%(i+1);
h_tab[i]=h_tab[r];
h_tab[r]=i;
}
// Allocation des vecteurs dans la mmoire GPU
checkCudaErrors(hipMalloc((void**) &d_A, size));// A complter
checkCudaErrors(hipMalloc((void**) &d_B, size));
checkCudaErrors(hipMalloc((void**) &d_C, size));
checkCudaErrors(hipMalloc((void**) &d_tab, sizeInt));
// Initialisation des vecteurs A et B
srand(time(NULL));
gendata(h_A, n);
gendata(h_B, n);
calc_cpu(h_A, h_B, h_C, n);
// Copie des vecteur A et B dans la mmoire GPU
checkCudaErrors(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_tab, h_tab, sizeInt, hipMemcpyHostToDevice));
// Appel du kernel
dim3 threadsPerBlock(1024);
dim3 numBlocks(ceil(n/(float)threadsPerBlock.x));
hipLaunchKernelGGL(( vecadd_device), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, n, d_tab);
//vecadd_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n, n_warp);
checkCudaErrors(hipPeekAtLastError());
// Synchronisation
checkCudaErrors(hipDeviceSynchronize());
// Copie du resultat de la memoire GPU vers la memoire CPU
checkCudaErrors(hipMemcpy(h_res, d_C, size, hipMemcpyDeviceToHost));
/*
for(int i=0; i<n; i++)
cout << "tid = " << h_res[i] << endl;
*/
// Comparaison des rsultats calculs sur le GPU avec ceux calculsBp
// sur le CPU et affichage de l'erreur
std::cout << "Erreur relative : " << calcerr(h_res, h_C, n) << std::endl;// A complter
// Timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
const int nb = 10;
checkCudaErrors(hipEventRecord(start, 0));
for (int i = 0; i < nb; i++)
//vecadd_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n, n_warp);
hipLaunchKernelGGL(( vecadd_device), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, n, d_tab);
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
// Calcul du temps d'ecution d'unn kernel
float t_ms;
checkCudaErrors(hipEventElapsedTime(&t_ms, start, stop));
t_ms /= nb;
printf("Temps d'excution du Kernel : %e (ms)\n", t_ms);
// Affichage de la bande passante en GO/s
/*
if (n_warp == 0)
{
printf("Bande passante : %e (GO/s)\n", 3*size/1.024/1024/1024/t_ms); // A complter);
}
else
{
//size_t sizeOffset = n_warp*sizeof(float);
//printf("Bande passante : %e (GO/s)\n", 3*(size-sizeOffset)/1.024/1024/1024/t_ms);
printf("Bande passante : %e (GO/s)\n", 3*32*4/1.024/1024/1024/t_ms); // pour un warp 32 threads de 4 octets
}
*/
printf("Bande passante : %e (GO/s)\n", 3*size/1.024/1024/1024/t_ms); // A complter);
cleanup(h_A, h_B, h_C, h_res, h_tab, d_A, d_B, d_C, d_tab);
//cleanup(h_A, h_B, h_C, h_res, d_A, d_B, d_C);
return 0;
}
| bce4cf264fc8b8034619700f6ada0a442d3d8960.cu | /* Les 2 lignes suivantes permettent d'utiliser nvcc avec gcc 4.7 (sinon erreur de compilation) */
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
// Includes
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
//#include <cmath>
#include <ctime>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_cuda.h"
using namespace std;
/**
* Fonction remplissant le vecteur data de données aléatoires
* comprises entre 0 et 1
*/
void gendata(float *data, int n)
{
for (int i = 0; i < n; i++)
data[i] = rand()/((float) RAND_MAX);
}
/**
* Calcul de c = a + b sur CPU
*/
void calc_cpu(float *a, float *b, float *c, int n)
{
for (int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
/**
* Calcul de l'erreur relative entre le calcul GPU et le
* calcul CPU
*/
float calcerr(float *gpu, float *cpu, int n)
{
float err_max = 0.;
for (int i = 0; i < n; i++)
{
const float res = cpu[i];
float err;
if (fabs(res) < 1e-7)
err = fabs(res - gpu[i]);
else
err = fabs((res - gpu[i])/res);
if (err > err_max)
err_max = err;
}
return err_max;
}
/**
* Kernel de calcul
*/
__global__ void vecadd_device(float *a, float *b, float *c, int n, int *tab)
//__global__ void vecadd_device(float *a, float *b, float *c, int n, int n_warp)
{
/*
if (n_warp == 0)
{
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid<n)
c[tid]=a[tid]+b[tid];
}
else
{
int offset = n_warp; const int tid = blockIdx.x*blockDim.x + threadIdx.x + offset;
//const int tid = blockIdx.x* blockDim.x + blockDim.x - 1 - threadIdx.x;
//const int tid = blockIdx.x*blockDim.x + threadIdx.x + n_warp*32;
const int tidSup = n_warp*32 + 32;
if (tid<n && tid<tidSup)
//if (tid<n)
c[tid]=a[tid]+b[tid];
}
*/
const int tid = blockIdx.x*blockDim.x + tab[threadIdx.x];
if (tid<n)
c[tid]=a[tid]+b[tid];
}
//void cleanup(float* h_A, float* h_B, float* h_C, float* h_res,
//float* d_A, float* d_B, float* d_C)
void cleanup(float* h_A, float* h_B, float* h_C, float* h_res, int* h_tab,
float* d_A, float* d_B, float* d_C, int* d_tab)
{
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
if (h_res)
free(h_res);
if (h_tab)
free(h_tab);
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
if (d_tab)
cudaFree(d_tab);
}
// Host code
int main(int argc, char** argv)
{
if (argc != 2) {
cout << "Usage: addition n\n";
exit(-1);
}
printf("Addition vectorielle\n");
// Récupération de la taille du vecteur dans argv
int n = atoi(argv[1]); // A compléter
size_t size = n*sizeof(float);
// cout << "memoire total pour des float = " << size << endl;
//int n_warp = 20000000;
// Vecteurs CPU
float *h_A = nullptr, *h_B = nullptr, *h_C = nullptr;
// Vecteur GPU
float *d_A = nullptr, *d_B = nullptr, *d_C = nullptr;
// Allocatation des vecteurs dans la mémoire CPU
h_A = (float*) malloc(size);// A compléter
h_B = (float*) malloc(size);
h_C = (float*) malloc(size);
float *h_res = (float*) malloc(size);
// adressage indirecte
size_t sizeInt = 1024*sizeof(int);
int *h_tab = nullptr;
h_tab = (int*) malloc(sizeInt);
int *d_tab = nullptr;
for (int i=0; i<1024; i++)
h_tab[i]=i;
//permutations aléatoires des 256 premiers
for (int i=0; i<256; i++)
{
int r=rand()%(i+1);
h_tab[i]=h_tab[r];
h_tab[r]=i;
}
// Allocation des vecteurs dans la mémoire GPU
checkCudaErrors(cudaMalloc((void**) &d_A, size));// A compléter
checkCudaErrors(cudaMalloc((void**) &d_B, size));
checkCudaErrors(cudaMalloc((void**) &d_C, size));
checkCudaErrors(cudaMalloc((void**) &d_tab, sizeInt));
// Initialisation des vecteurs A et B
srand(time(NULL));
gendata(h_A, n);
gendata(h_B, n);
calc_cpu(h_A, h_B, h_C, n);
// Copie des vecteur A et B dans la mémoire GPU
checkCudaErrors(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_tab, h_tab, sizeInt, cudaMemcpyHostToDevice));
// Appel du kernel
dim3 threadsPerBlock(1024);
dim3 numBlocks(ceil(n/(float)threadsPerBlock.x));
vecadd_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n, d_tab);
//vecadd_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n, n_warp);
checkCudaErrors(cudaPeekAtLastError());
// Synchronisation
checkCudaErrors(cudaDeviceSynchronize());
// Copie du resultat de la memoire GPU vers la memoire CPU
checkCudaErrors(cudaMemcpy(h_res, d_C, size, cudaMemcpyDeviceToHost));
/*
for(int i=0; i<n; i++)
cout << "tid = " << h_res[i] << endl;
*/
// Comparaison des résultats calculés sur le GPU avec ceux calculésBp
// sur le CPU et affichage de l'erreur
std::cout << "Erreur relative : " << calcerr(h_res, h_C, n) << std::endl;// A compléter
// Timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
const int nb = 10;
checkCudaErrors(cudaEventRecord(start, 0));
for (int i = 0; i < nb; i++)
//vecadd_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n, n_warp);
vecadd_device<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, n, d_tab);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
// Calcul du temps d'eécution d'unn kernel
float t_ms;
checkCudaErrors(cudaEventElapsedTime(&t_ms, start, stop));
t_ms /= nb;
printf("Temps d'exécution du Kernel : %e (ms)\n", t_ms);
// Affichage de la bande passante en GO/s
/*
if (n_warp == 0)
{
printf("Bande passante : %e (GO/s)\n", 3*size/1.024/1024/1024/t_ms); // A compléter);
}
else
{
//size_t sizeOffset = n_warp*sizeof(float);
//printf("Bande passante : %e (GO/s)\n", 3*(size-sizeOffset)/1.024/1024/1024/t_ms);
printf("Bande passante : %e (GO/s)\n", 3*32*4/1.024/1024/1024/t_ms); // pour un warp 32 threads de 4 octets
}
*/
printf("Bande passante : %e (GO/s)\n", 3*size/1.024/1024/1024/t_ms); // A compléter);
cleanup(h_A, h_B, h_C, h_res, h_tab, d_A, d_B, d_C, d_tab);
//cleanup(h_A, h_B, h_C, h_res, d_A, d_B, d_C);
return 0;
}
|
330cd2d8fdcb67417efde1b8dfafbe288a423435.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Walsh transforms belong to a class of generalized Fourier transformations.
* They have applications in various fields of electrical engineering
* and numeric theory. In this sample we demonstrate efficient implementation
* of naturally-ordered Walsh transform
* (also known as Walsh-Hadamard or Hadamard transform) in CUDA and its
* particular application to dyadic convolution computation.
* Refer to excellent Jorg Arndt's "Algorithms for Programmers" textbook
* http://www.jjj.de/fxt/fxtbook.pdf (Chapter 22)
*
* Victor Podlozhnyuk (vpodlozhnyuk@nvidia.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#include <helper_functions.h>
//#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////////////
// Reference CPU FWT
////////////////////////////////////////////////////////////////////////////////
extern"C" void fwtCPU(float *h_Output, float *h_Input, int log2N);
extern"C" void slowWTcpu(float *h_Output, float *h_Input, int log2N);
extern "C" void dyadicConvolutionCPU(
float *h_Result,
float *h_Data,
float *h_Kernel,
int log2dataN,
int log2kernelN
);
////////////////////////////////////////////////////////////////////////////////
// GPU FWT
////////////////////////////////////////////////////////////////////////////////
//#include "fastWalshTransform_kernel.cuh"
#define ELEMENTARY_LOG2SIZE 11
__extern__shared__ float s_data[];
__global__ void fwtBatch1Kernel(float *d_Output, float *d_Input, int log2N)
{
const int N = 1 << log2N;
const int base = blockIdx.x << log2N;
//(2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80
float *d_Src = d_Input + base;
float *d_Dst = d_Output + base;
for (int pos = threadIdx.x; pos < N; pos += blockDim.x)
{
s_data[pos] = d_Src[pos];
}
//Main radix-4 stages
const int pos = threadIdx.x;
for (int stride = N >> 2; stride > 0; stride >>= 2)
{
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
__syncthreads();
float D0 = s_data[i0];
float D1 = s_data[i1];
float D2 = s_data[i2];
float D3 = s_data[i3];
float T;
T = D0;
D0 = D0 + D2;
D2 = T - D2;
T = D1;
D1 = D1 + D3;
D3 = T - D3;
T = D0;
s_data[i0] = D0 + D1;
s_data[i1] = T - D1;
T = D2;
s_data[i2] = D2 + D3;
s_data[i3] = T - D3;
}
//Do single radix-2 stage for odd power of two
if (log2N & 1)
{
__syncthreads();
for (int pos = threadIdx.x; pos < N / 2; pos += blockDim.x)
{
int i0 = pos << 1;
int i1 = i0 + 1;
float D0 = s_data[i0];
float D1 = s_data[i1];
s_data[i0] = D0 + D1;
s_data[i1] = D0 - D1;
}
}
__syncthreads();
for (int pos = threadIdx.x; pos < N; pos += blockDim.x)
{
d_Dst[pos] = s_data[pos];
}
}
////////////////////////////////////////////////////////////////////////////////
// Single in-global memory radix-4 Fast Walsh Transform pass
// (for strides exceeding elementary vector size)
////////////////////////////////////////////////////////////////////////////////
__global__ void fwtBatch2Kernel(
float *d_Output,
float *d_Input,
int stride
)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int N = blockDim.x * gridDim.x * 4;
float *d_Src = d_Input + blockIdx.y * N;
float *d_Dst = d_Output + blockIdx.y * N;
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
float D0 = d_Src[i0];
float D1 = d_Src[i1];
float D2 = d_Src[i2];
float D3 = d_Src[i3];
float T;
T = D0;
D0 = D0 + D2;
D2 = T - D2;
T = D1;
D1 = D1 + D3;
D3 = T - D3;
T = D0;
d_Dst[i0] = D0 + D1;
d_Dst[i1] = T - D1;
T = D2;
d_Dst[i2] = D2 + D3;
d_Dst[i3] = T - D3;
}
////////////////////////////////////////////////////////////////////////////////
// Put everything together: batched Fast Walsh Transform CPU front-end
////////////////////////////////////////////////////////////////////////////////
void fwtBatchGPU(float *d_Data, int M, int log2N)
{
const int THREAD_N = 256;
int N = 1 << log2N;
dim3 grid((1 << log2N) / (4 * THREAD_N), M, 1);
for (; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2)
{
hipLaunchKernelGGL(( fwtBatch2Kernel), dim3(grid), dim3(THREAD_N), 0, 0, d_Data, d_Data, N / 4);
//getLastCudaError("fwtBatch2Kernel() execution failed\n");
printf("fwtBatch2Kernel() execution failed\n");
}
hipLaunchKernelGGL(( fwtBatch1Kernel), dim3(M), dim3(N / 4), N *sizeof(float), 0,
d_Data,
d_Data,
log2N
);
//getLastCudaError("fwtBatch1Kernel() execution failed\n");
printf("fwtBatch1Kernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Modulate two arrays
////////////////////////////////////////////////////////////////////////////////
__global__ void modulateKernel(float *d_A, float *d_B, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numThreads = blockDim.x * gridDim.x;
float rcpN = 1.0f / (float)N;
for (int pos = tid; pos < N; pos += numThreads)
{
d_A[pos] *= d_B[pos] * rcpN;
}
}
//Interface to modulateKernel()
void modulateGPU(float *d_A, float *d_B, int N)
{
hipLaunchKernelGGL(( modulateKernel), dim3(128), dim3(256), 0, 0, d_A, d_B, N);
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//const int log2Kernel = 7;
//const int log2Data = 23;
const int log2Kernel = 6;
const int log2Data = 12;
const int dataN = 1 << log2Data;
const int kernelN = 1 << log2Kernel;
const int DATA_SIZE = dataN * sizeof(float);
const int KERNEL_SIZE = kernelN * sizeof(float);
const long NOPS = 3.0 * (double)dataN * (double)log2Data / 2.0;
//const double NOPS = 3.0 * (double)dataN * (double)log2Data / 2.0;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
float *h_Data,
*h_Kernel,
*h_ResultCPU,
*h_ResultGPU;
float *d_Data,
*d_Kernel;
double delta, ref, sum_delta2, sum_ref2, L2norm, gpuTime;
//StopWatchInterface *hTimer = NULL;
int i;
printf("%s Starting...\n\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//findCudaDevice(argc, (const char **)argv);
//sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory\n");
h_Kernel = (float *)malloc(KERNEL_SIZE);
h_Data = (float *)malloc(DATA_SIZE);
h_ResultCPU = (float *)malloc(DATA_SIZE);
h_ResultGPU = (float *)malloc(DATA_SIZE);
printf("...allocating GPU memory\n");
//checkCudaErrors(hipMalloc((void **)&d_Kernel, DATA_SIZE));
//checkCudaErrors(hipMalloc((void **)&d_Data, DATA_SIZE));
hipMalloc((void **)&d_Kernel, DATA_SIZE);
hipMalloc((void **)&d_Data, DATA_SIZE);
printf("...generating data\n");
printf("Data length: %i; kernel length: %i\n", dataN, kernelN);
#ifdef _SYM
klee_make_symbolic(h_Kernel, KERNEL_SIZE, "h_Kernel_input");
klee_make_symbolic(h_Data, DATA_SIZE, "h_Data_input");
#else
srand(2007);
for (i = 0; i < kernelN; i++)
{
h_Kernel[i] = (float)rand() / (float)RAND_MAX;
}
for (i = 0; i < dataN; i++)
{
h_Data[i] = (float)rand() / (float)RAND_MAX;
}
#endif
//checkCudaErrors(hipMemset(d_Kernel, 0, DATA_SIZE));
//checkCudaErrors(hipMemcpy(d_Kernel, h_Kernel, KERNEL_SIZE, hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(d_Data, h_Data, DATA_SIZE, hipMemcpyHostToDevice));
hipMemset(d_Kernel, 0, DATA_SIZE);
hipMemcpy(d_Kernel, h_Kernel, KERNEL_SIZE, hipMemcpyHostToDevice);
hipMemcpy(d_Data, h_Data, DATA_SIZE, hipMemcpyHostToDevice);
printf("Running GPU dyadic convolution using Fast Walsh Transform...\n");
//checkCudaErrors(hipDeviceSynchronize());
//sdkResetTimer(&hTimer);
//sdkStartTimer(&hTimer);
fwtBatchGPU(d_Data, 1, log2Data);
fwtBatchGPU(d_Kernel, 1, log2Data);
modulateGPU(d_Data, d_Kernel, dataN);
fwtBatchGPU(d_Data, 1, log2Data);
//checkCudaErrors(hipDeviceSynchronize());
//sdkStopTimer(&hTimer);
//gpuTime = sdkGetTimerValue(&hTimer);
// removed for global FP choking Gklee
// printf("GPU time: %f ms; GOP/s: %f\n", gpuTime, NOPS / (gpuTime * 0.001 * 1E+9));
printf("Reading back GPU results...\n");
//checkCudaErrors(hipMemcpy(h_ResultGPU, d_Data, DATA_SIZE, hipMemcpyDeviceToHost));
hipMemcpy(h_ResultGPU, d_Data, DATA_SIZE, hipMemcpyDeviceToHost);
printf("Running straightforward CPU dyadic convolution...\n");
//dyadicConvolutionCPU(h_ResultCPU, h_Data, h_Kernel, log2Data, log2Kernel);
printf("Comparing the results...\n");
sum_delta2 = 0;
sum_ref2 = 0;
/*for (i = 0; i < dataN; i++)
{
delta = h_ResultCPU[i] - h_ResultGPU[i];
ref = h_ResultCPU[i];
sum_delta2 += delta * delta;
sum_ref2 += ref * ref;
}*/
L2norm = sqrt(sum_delta2 / sum_ref2);
printf("Shutting down...\n");
//sdkDeleteTimer(&hTimer);
//checkCudaErrors(hipFree(d_Data));
//checkCudaErrors(hipFree(d_Kernel));
hipFree(d_Data);
hipFree(d_Kernel);
free(h_ResultGPU);
free(h_ResultCPU);
free(h_Data);
free(h_Kernel);
//hipDeviceReset();
printf("L2 norm: %E\n", L2norm);
printf(L2norm < 1e-6 ? "Test passed\n" : "Test failed!\n");
}
| 330cd2d8fdcb67417efde1b8dfafbe288a423435.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Walsh transforms belong to a class of generalized Fourier transformations.
* They have applications in various fields of electrical engineering
* and numeric theory. In this sample we demonstrate efficient implementation
* of naturally-ordered Walsh transform
* (also known as Walsh-Hadamard or Hadamard transform) in CUDA and its
* particular application to dyadic convolution computation.
* Refer to excellent Jorg Arndt's "Algorithms for Programmers" textbook
* http://www.jjj.de/fxt/fxtbook.pdf (Chapter 22)
*
* Victor Podlozhnyuk (vpodlozhnyuk@nvidia.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#include <helper_functions.h>
//#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////////////
// Reference CPU FWT
////////////////////////////////////////////////////////////////////////////////
extern"C" void fwtCPU(float *h_Output, float *h_Input, int log2N);
extern"C" void slowWTcpu(float *h_Output, float *h_Input, int log2N);
extern "C" void dyadicConvolutionCPU(
float *h_Result,
float *h_Data,
float *h_Kernel,
int log2dataN,
int log2kernelN
);
////////////////////////////////////////////////////////////////////////////////
// GPU FWT
////////////////////////////////////////////////////////////////////////////////
//#include "fastWalshTransform_kernel.cuh"
#define ELEMENTARY_LOG2SIZE 11
__extern__shared__ float s_data[];
__global__ void fwtBatch1Kernel(float *d_Output, float *d_Input, int log2N)
{
const int N = 1 << log2N;
const int base = blockIdx.x << log2N;
//(2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80
float *d_Src = d_Input + base;
float *d_Dst = d_Output + base;
for (int pos = threadIdx.x; pos < N; pos += blockDim.x)
{
s_data[pos] = d_Src[pos];
}
//Main radix-4 stages
const int pos = threadIdx.x;
for (int stride = N >> 2; stride > 0; stride >>= 2)
{
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
__syncthreads();
float D0 = s_data[i0];
float D1 = s_data[i1];
float D2 = s_data[i2];
float D3 = s_data[i3];
float T;
T = D0;
D0 = D0 + D2;
D2 = T - D2;
T = D1;
D1 = D1 + D3;
D3 = T - D3;
T = D0;
s_data[i0] = D0 + D1;
s_data[i1] = T - D1;
T = D2;
s_data[i2] = D2 + D3;
s_data[i3] = T - D3;
}
//Do single radix-2 stage for odd power of two
if (log2N & 1)
{
__syncthreads();
for (int pos = threadIdx.x; pos < N / 2; pos += blockDim.x)
{
int i0 = pos << 1;
int i1 = i0 + 1;
float D0 = s_data[i0];
float D1 = s_data[i1];
s_data[i0] = D0 + D1;
s_data[i1] = D0 - D1;
}
}
__syncthreads();
for (int pos = threadIdx.x; pos < N; pos += blockDim.x)
{
d_Dst[pos] = s_data[pos];
}
}
////////////////////////////////////////////////////////////////////////////////
// Single in-global memory radix-4 Fast Walsh Transform pass
// (for strides exceeding elementary vector size)
////////////////////////////////////////////////////////////////////////////////
__global__ void fwtBatch2Kernel(
float *d_Output,
float *d_Input,
int stride
)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int N = blockDim.x * gridDim.x * 4;
float *d_Src = d_Input + blockIdx.y * N;
float *d_Dst = d_Output + blockIdx.y * N;
int lo = pos & (stride - 1);
int i0 = ((pos - lo) << 2) + lo;
int i1 = i0 + stride;
int i2 = i1 + stride;
int i3 = i2 + stride;
float D0 = d_Src[i0];
float D1 = d_Src[i1];
float D2 = d_Src[i2];
float D3 = d_Src[i3];
float T;
T = D0;
D0 = D0 + D2;
D2 = T - D2;
T = D1;
D1 = D1 + D3;
D3 = T - D3;
T = D0;
d_Dst[i0] = D0 + D1;
d_Dst[i1] = T - D1;
T = D2;
d_Dst[i2] = D2 + D3;
d_Dst[i3] = T - D3;
}
////////////////////////////////////////////////////////////////////////////////
// Put everything together: batched Fast Walsh Transform CPU front-end
////////////////////////////////////////////////////////////////////////////////
void fwtBatchGPU(float *d_Data, int M, int log2N)
{
const int THREAD_N = 256;
int N = 1 << log2N;
dim3 grid((1 << log2N) / (4 * THREAD_N), M, 1);
for (; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2)
{
fwtBatch2Kernel<<<grid, THREAD_N>>>(d_Data, d_Data, N / 4);
//getLastCudaError("fwtBatch2Kernel() execution failed\n");
printf("fwtBatch2Kernel() execution failed\n");
}
fwtBatch1Kernel<<<M, N / 4, N *sizeof(float)>>>(
d_Data,
d_Data,
log2N
);
//getLastCudaError("fwtBatch1Kernel() execution failed\n");
printf("fwtBatch1Kernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Modulate two arrays
////////////////////////////////////////////////////////////////////////////////
__global__ void modulateKernel(float *d_A, float *d_B, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numThreads = blockDim.x * gridDim.x;
float rcpN = 1.0f / (float)N;
for (int pos = tid; pos < N; pos += numThreads)
{
d_A[pos] *= d_B[pos] * rcpN;
}
}
//Interface to modulateKernel()
void modulateGPU(float *d_A, float *d_B, int N)
{
modulateKernel<<<128, 256>>>(d_A, d_B, N);
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//const int log2Kernel = 7;
//const int log2Data = 23;
const int log2Kernel = 6;
const int log2Data = 12;
const int dataN = 1 << log2Data;
const int kernelN = 1 << log2Kernel;
const int DATA_SIZE = dataN * sizeof(float);
const int KERNEL_SIZE = kernelN * sizeof(float);
const long NOPS = 3.0 * (double)dataN * (double)log2Data / 2.0;
//const double NOPS = 3.0 * (double)dataN * (double)log2Data / 2.0;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
float *h_Data,
*h_Kernel,
*h_ResultCPU,
*h_ResultGPU;
float *d_Data,
*d_Kernel;
double delta, ref, sum_delta2, sum_ref2, L2norm, gpuTime;
//StopWatchInterface *hTimer = NULL;
int i;
printf("%s Starting...\n\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
//findCudaDevice(argc, (const char **)argv);
//sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory\n");
h_Kernel = (float *)malloc(KERNEL_SIZE);
h_Data = (float *)malloc(DATA_SIZE);
h_ResultCPU = (float *)malloc(DATA_SIZE);
h_ResultGPU = (float *)malloc(DATA_SIZE);
printf("...allocating GPU memory\n");
//checkCudaErrors(cudaMalloc((void **)&d_Kernel, DATA_SIZE));
//checkCudaErrors(cudaMalloc((void **)&d_Data, DATA_SIZE));
cudaMalloc((void **)&d_Kernel, DATA_SIZE);
cudaMalloc((void **)&d_Data, DATA_SIZE);
printf("...generating data\n");
printf("Data length: %i; kernel length: %i\n", dataN, kernelN);
#ifdef _SYM
klee_make_symbolic(h_Kernel, KERNEL_SIZE, "h_Kernel_input");
klee_make_symbolic(h_Data, DATA_SIZE, "h_Data_input");
#else
srand(2007);
for (i = 0; i < kernelN; i++)
{
h_Kernel[i] = (float)rand() / (float)RAND_MAX;
}
for (i = 0; i < dataN; i++)
{
h_Data[i] = (float)rand() / (float)RAND_MAX;
}
#endif
//checkCudaErrors(cudaMemset(d_Kernel, 0, DATA_SIZE));
//checkCudaErrors(cudaMemcpy(d_Kernel, h_Kernel, KERNEL_SIZE, cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(d_Data, h_Data, DATA_SIZE, cudaMemcpyHostToDevice));
cudaMemset(d_Kernel, 0, DATA_SIZE);
cudaMemcpy(d_Kernel, h_Kernel, KERNEL_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_Data, h_Data, DATA_SIZE, cudaMemcpyHostToDevice);
printf("Running GPU dyadic convolution using Fast Walsh Transform...\n");
//checkCudaErrors(cudaDeviceSynchronize());
//sdkResetTimer(&hTimer);
//sdkStartTimer(&hTimer);
fwtBatchGPU(d_Data, 1, log2Data);
fwtBatchGPU(d_Kernel, 1, log2Data);
modulateGPU(d_Data, d_Kernel, dataN);
fwtBatchGPU(d_Data, 1, log2Data);
//checkCudaErrors(cudaDeviceSynchronize());
//sdkStopTimer(&hTimer);
//gpuTime = sdkGetTimerValue(&hTimer);
// removed for global FP choking Gklee
// printf("GPU time: %f ms; GOP/s: %f\n", gpuTime, NOPS / (gpuTime * 0.001 * 1E+9));
printf("Reading back GPU results...\n");
//checkCudaErrors(cudaMemcpy(h_ResultGPU, d_Data, DATA_SIZE, cudaMemcpyDeviceToHost));
cudaMemcpy(h_ResultGPU, d_Data, DATA_SIZE, cudaMemcpyDeviceToHost);
printf("Running straightforward CPU dyadic convolution...\n");
//dyadicConvolutionCPU(h_ResultCPU, h_Data, h_Kernel, log2Data, log2Kernel);
printf("Comparing the results...\n");
sum_delta2 = 0;
sum_ref2 = 0;
/*for (i = 0; i < dataN; i++)
{
delta = h_ResultCPU[i] - h_ResultGPU[i];
ref = h_ResultCPU[i];
sum_delta2 += delta * delta;
sum_ref2 += ref * ref;
}*/
L2norm = sqrt(sum_delta2 / sum_ref2);
printf("Shutting down...\n");
//sdkDeleteTimer(&hTimer);
//checkCudaErrors(cudaFree(d_Data));
//checkCudaErrors(cudaFree(d_Kernel));
cudaFree(d_Data);
cudaFree(d_Kernel);
free(h_ResultGPU);
free(h_ResultCPU);
free(h_Data);
free(h_Kernel);
//cudaDeviceReset();
printf("L2 norm: %E\n", L2norm);
printf(L2norm < 1e-6 ? "Test passed\n" : "Test failed!\n");
}
|
6d6d60974e031fe96bd48616f508598a5c92554d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_test0_write(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = pattern;
}
return;
} | 6d6d60974e031fe96bd48616f508598a5c92554d.cu | #include "includes.h"
__global__ void kernel_test0_write(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = pattern;
}
return;
} |
422655919808c46d247c9b99e1db5fc1b9bddf69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void compute_array_log(float* array, float* groundTruth, int size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < size; i += num_threads)
{
int index = i + thread_index;
if(index < size)
{
groundTruth[index] = log(array[index]) * groundTruth[index];
}
}
} | 422655919808c46d247c9b99e1db5fc1b9bddf69.cu | #include "includes.h"
__global__ void compute_array_log(float* array, float* groundTruth, int size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < size; i += num_threads)
{
int index = i + thread_index;
if(index < size)
{
groundTruth[index] = log(array[index]) * groundTruth[index];
}
}
} |
bda33849ddf2b43da27c19fbb701e2a92d0466c5.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip/HIPConfig.h>
#include <ATen/hip/cub.cuh>
namespace at {
namespace cuda {
namespace cub {
template <typename key_t>
void radix_sort_keys(
const key_t* keys_in,
key_t* keys_out,
int64_t n,
bool descending,
int64_t begin_bit,
int64_t end_bit) {
TORCH_CHECK(
n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
const key_t_* keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_* keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::hipcub::DeviceRadixSort::SortKeysDescending,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::hip::getCurrentHIPStreamMasqueradingAsCUDA());
} else {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::hipcub::DeviceRadixSort::SortKeys,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::hip::getCurrentHIPStreamMasqueradingAsCUDA());
}
}
template <typename scalar_t>
void unique(
const scalar_t* input,
scalar_t* output,
int64_t* num_selected_out,
int64_t num_items) {
TORCH_CHECK(
num_items <= std::numeric_limits<int>::max(),
"cub unique does not support more than INT_MAX elements");
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::hipcub::DeviceSelect::Unique,
input,
output,
num_selected_out,
num_items,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
}
template <typename scalar_t>
void run_length_encode(
const scalar_t* input,
scalar_t* output,
int64_t* counts_out,
int64_t* length_out,
int64_t num_items) {
TORCH_CHECK(
num_items <= std::numeric_limits<int>::max(),
"cub run_length_encode does not support more than INT_MAX elements");
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::hipcub::DeviceRunLengthEncode::Encode,
input,
output,
counts_out,
length_out,
num_items,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
}
#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \
template void radix_sort_keys( \
const scalar_t* keys_in, \
scalar_t* keys_out, \
int64_t n, \
bool descending, \
int64_t begin_bit, \
int64_t end_bit); \
template void unique( \
const scalar_t* input, \
scalar_t* output, \
int64_t* num_selected_out, \
int64_t num_items); \
template void run_length_encode( \
const scalar_t* input, \
scalar_t* output, \
int64_t* counts_out, \
int64_t* length_out, \
int64_t n);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
} // namespace cub
} // namespace cuda
} // namespace at
| bda33849ddf2b43da27c19fbb701e2a92d0466c5.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAConfig.h>
#include <ATen/cuda/cub.cuh>
namespace at {
namespace cuda {
namespace cub {
template <typename key_t>
void radix_sort_keys(
const key_t* keys_in,
key_t* keys_out,
int64_t n,
bool descending,
int64_t begin_bit,
int64_t end_bit) {
TORCH_CHECK(
n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
const key_t_* keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_* keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::cuda::getCurrentCUDAStream());
} else {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::cuda::getCurrentCUDAStream());
}
}
template <typename scalar_t>
void unique(
const scalar_t* input,
scalar_t* output,
int64_t* num_selected_out,
int64_t num_items) {
TORCH_CHECK(
num_items <= std::numeric_limits<int>::max(),
"cub unique does not support more than INT_MAX elements");
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique,
input,
output,
num_selected_out,
num_items,
at::cuda::getCurrentCUDAStream());
}
template <typename scalar_t>
void run_length_encode(
const scalar_t* input,
scalar_t* output,
int64_t* counts_out,
int64_t* length_out,
int64_t num_items) {
TORCH_CHECK(
num_items <= std::numeric_limits<int>::max(),
"cub run_length_encode does not support more than INT_MAX elements");
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode,
input,
output,
counts_out,
length_out,
num_items,
at::cuda::getCurrentCUDAStream());
}
#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \
template void radix_sort_keys( \
const scalar_t* keys_in, \
scalar_t* keys_out, \
int64_t n, \
bool descending, \
int64_t begin_bit, \
int64_t end_bit); \
template void unique( \
const scalar_t* input, \
scalar_t* output, \
int64_t* num_selected_out, \
int64_t num_items); \
template void run_length_encode( \
const scalar_t* input, \
scalar_t* output, \
int64_t* counts_out, \
int64_t* length_out, \
int64_t n);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
} // namespace cub
} // namespace cuda
} // namespace at
|
1c20fc37ab8f1c5e258dbb9ef582105e38f26674.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/prroi_pool_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
DEVICE void PrRoIPoolingDistributeDiffCUDA(T* diff, const T top_diff,
const int h, const int w,
const int height, const int width,
const T coeff) {
bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);
if (!overflow) {
paddle::platform::CudaAtomicAdd(diff + h * width + w, top_diff * coeff);
}
}
template <typename T>
DEVICE void GPUAccumulateRois(T* offset, T data) {
paddle::platform::CudaAtomicAdd(offset, data);
}
template <typename T>
__global__ void GPUPRROIPoolForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int input_channels, const int height,
const int width, const int output_channels, const int pooled_height,
const int pooled_width, const int* rois_batch_id_data, T* output_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int input_channel = c;
const T* offset_input_data =
input_data +
(roi_batch_id * input_channels + input_channel) * height * width;
if (win_size > static_cast<T>(0.0)) {
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = 0;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
sum_out += PrRoIPoolingMatCalculation(
offset_input_data, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width);
}
}
output_data[i] = sum_out / win_size;
} else {
output_data[i] = 0.;
}
}
}
template <typename T>
__global__ void GPUPRROIPoolBackward(
const int nthreads, const T* in_data, const T* input_rois,
const T* output_grad_data, const float spatial_scale,
const int input_channels, const int height, const int width,
const int output_channels, const int pooled_height, const int pooled_width,
const int* rois_batch_id_data, T* input_grad_data, const T* out_data,
T* input_roi_grad_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
int input_channel = c;
int input_offset =
(roi_batch_id * input_channels + input_channel) * height * width;
T* offset_input_grad_data = input_grad_data + input_offset;
const T* offset_output_grad_data = output_grad_data + i;
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T* offset_input_roi_grad_data = input_roi_grad_data + n * 4;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = win_size == static_cast<T>(0.)
? static_cast<T>(0.)
: *offset_output_grad_data / win_size;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
PrRoIPoolingMatDistributeDiff(
offset_input_grad_data, sum_out, h_iter, w_iter, h_iter + 1,
w_iter + 1, max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width, PrRoIPoolingDistributeDiffCUDA<T>);
}
}
const T* offset_out_data = out_data + i;
const T* offset_in_data = in_data + input_offset;
PrRoIPoolingCoorBackward(
s_w, e_w, s_h, e_h, width, height, win_start_w, win_start_h, win_end_w,
win_end_h, pw, ph, pooled_width, pooled_height, win_size, spatial_scale,
offset_in_data, offset_out_data, offset_input_roi_grad_data,
offset_output_grad_data, GPUAccumulateRois<T>,
[](const T x, const T y) { return max(x, y); },
[](const T x, const T y) { return min(x, y); });
}
}
template <typename T>
class GPUPRROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int input_channels = in_dims[1];
auto output_channels = input_channels;
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
// set rois batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and input(X) batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
// call cuda kernel function
hipLaunchKernelGGL(( GPUPRROIPoolForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size, in->data<T>(), rois->data<T>(), spatial_scale,
input_channels, height, width, output_channels, pooled_height,
pooled_width, roi_id_data, out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename DeviceContext, typename T>
class GPUPRROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Input<framework::Tensor>("Out");
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* input_roi_grad =
ctx.Output<LoDTensor>(framework::GradVarName("ROIs"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int input_channels = in->dims()[1];
auto output_channels = input_channels;
int height = in->dims()[2];
int width = in->dims()[3];
if (input_grad || input_roi_grad) {
// set roi batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
PADDLE_ENFORCE_EQ(rois->lod().empty(), false,
platform::errors::InvalidArgument(
"the lod of Input ROIs should not be empty when "
"BatchRoINums is None!"));
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
input_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<DeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0));
input_roi_grad->mutable_data<T>(ctx.GetPlace());
set_zero(ctx.cuda_device_context(), input_roi_grad, static_cast<T>(0));
int output_grad_size = output_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUPRROIPoolBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size, in->data<T>(), rois->data<T>(),
output_grad->data<T>(), spatial_scale, input_channels, height,
width, output_channels, pooled_height, pooled_width, roi_id_data,
input_grad->mutable_data<T>(ctx.GetPlace()), out->data<T>(),
input_roi_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(prroi_pool, ops::GPUPRROIPoolOpKernel<float>,
ops::GPUPRROIPoolOpKernel<double>);
REGISTER_OP_CUDA_KERNEL(
prroi_pool_grad,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
| 1c20fc37ab8f1c5e258dbb9ef582105e38f26674.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/prroi_pool_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
DEVICE void PrRoIPoolingDistributeDiffCUDA(T* diff, const T top_diff,
const int h, const int w,
const int height, const int width,
const T coeff) {
bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);
if (!overflow) {
paddle::platform::CudaAtomicAdd(diff + h * width + w, top_diff * coeff);
}
}
template <typename T>
DEVICE void GPUAccumulateRois(T* offset, T data) {
paddle::platform::CudaAtomicAdd(offset, data);
}
template <typename T>
__global__ void GPUPRROIPoolForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int input_channels, const int height,
const int width, const int output_channels, const int pooled_height,
const int pooled_width, const int* rois_batch_id_data, T* output_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int input_channel = c;
const T* offset_input_data =
input_data +
(roi_batch_id * input_channels + input_channel) * height * width;
if (win_size > static_cast<T>(0.0)) {
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = 0;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
sum_out += PrRoIPoolingMatCalculation(
offset_input_data, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width);
}
}
output_data[i] = sum_out / win_size;
} else {
output_data[i] = 0.;
}
}
}
template <typename T>
__global__ void GPUPRROIPoolBackward(
const int nthreads, const T* in_data, const T* input_rois,
const T* output_grad_data, const float spatial_scale,
const int input_channels, const int height, const int width,
const int output_channels, const int pooled_height, const int pooled_width,
const int* rois_batch_id_data, T* input_grad_data, const T* out_data,
T* input_roi_grad_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
int input_channel = c;
int input_offset =
(roi_batch_id * input_channels + input_channel) * height * width;
T* offset_input_grad_data = input_grad_data + input_offset;
const T* offset_output_grad_data = output_grad_data + i;
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T* offset_input_roi_grad_data = input_roi_grad_data + n * 4;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = win_size == static_cast<T>(0.)
? static_cast<T>(0.)
: *offset_output_grad_data / win_size;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
PrRoIPoolingMatDistributeDiff(
offset_input_grad_data, sum_out, h_iter, w_iter, h_iter + 1,
w_iter + 1, max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width, PrRoIPoolingDistributeDiffCUDA<T>);
}
}
const T* offset_out_data = out_data + i;
const T* offset_in_data = in_data + input_offset;
PrRoIPoolingCoorBackward(
s_w, e_w, s_h, e_h, width, height, win_start_w, win_start_h, win_end_w,
win_end_h, pw, ph, pooled_width, pooled_height, win_size, spatial_scale,
offset_in_data, offset_out_data, offset_input_roi_grad_data,
offset_output_grad_data, GPUAccumulateRois<T>,
[](const T x, const T y) { return max(x, y); },
[](const T x, const T y) { return min(x, y); });
}
}
template <typename T>
class GPUPRROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int input_channels = in_dims[1];
auto output_channels = input_channels;
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
// set rois batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and input(X) batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
// call cuda kernel function
GPUPRROIPoolForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size, in->data<T>(), rois->data<T>(), spatial_scale,
input_channels, height, width, output_channels, pooled_height,
pooled_width, roi_id_data, out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename DeviceContext, typename T>
class GPUPRROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Input<framework::Tensor>("Out");
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* input_roi_grad =
ctx.Output<LoDTensor>(framework::GradVarName("ROIs"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int input_channels = in->dims()[1];
auto output_channels = input_channels;
int height = in->dims()[2];
int width = in->dims()[3];
if (input_grad || input_roi_grad) {
// set roi batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
PADDLE_ENFORCE_EQ(rois->lod().empty(), false,
platform::errors::InvalidArgument(
"the lod of Input ROIs should not be empty when "
"BatchRoINums is None!"));
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
input_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<DeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0));
input_roi_grad->mutable_data<T>(ctx.GetPlace());
set_zero(ctx.cuda_device_context(), input_roi_grad, static_cast<T>(0));
int output_grad_size = output_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUPRROIPoolBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size, in->data<T>(), rois->data<T>(),
output_grad->data<T>(), spatial_scale, input_channels, height,
width, output_channels, pooled_height, pooled_width, roi_id_data,
input_grad->mutable_data<T>(ctx.GetPlace()), out->data<T>(),
input_roi_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(prroi_pool, ops::GPUPRROIPoolOpKernel<float>,
ops::GPUPRROIPoolOpKernel<double>);
REGISTER_OP_CUDA_KERNEL(
prroi_pool_grad,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
494801376d44a650bee445b295893bf3c7337dda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "hoomd/HOOMDMath.h"
#include "MuellerPlatheFlow.h"
#include "MuellerPlatheFlowGPU.h"
#include "MuellerPlatheFlowGPU.cuh"
#include <assert.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
struct vel_search_un_opt : public thrust::unary_function< const unsigned int,Scalar3>
{
vel_search_un_opt(const Scalar4*const d_vel,const unsigned int *const d_tag,flow_enum::Direction flow_direction)
:
m_vel(d_vel),
m_tag(d_tag),
m_flow_direction(flow_direction)
{}
const Scalar4*const m_vel;
const unsigned int*const m_tag;
const flow_enum::Direction m_flow_direction;
__host__ __device__ Scalar3 operator()(const unsigned int idx)const
{
const unsigned int tag = m_tag[idx];
Scalar vel;
switch( m_flow_direction )
{
case flow_enum::X: vel = m_vel[idx].x; break;
case flow_enum::Y: vel = m_vel[idx].y; break;
case flow_enum::Z: vel = m_vel[idx].z; break;
}
const Scalar mass = m_vel[idx].w;
vel *= mass;
Scalar3 result;
result.x = vel;
result.y = mass;
result.z = __int_as_scalar(tag);
return result;
}
};
template <typename CMP>
struct vel_search_binary_opt : public thrust::binary_function< Scalar3, Scalar3, Scalar3 >
{
vel_search_binary_opt(const unsigned int*const d_rtag,
const Scalar4*const d_pos,
const BoxDim gl_box,
const unsigned int Nslabs,
const unsigned int slab_index,
const Scalar3 invalid,
const flow_enum::Direction slab_direction)
: m_rtag(d_rtag),
m_pos(d_pos),
m_gl_box(gl_box),
m_Nslabs(Nslabs),
m_slab_index(slab_index),
m_invalid(invalid),
m_slab_direction(slab_direction)
{}
const unsigned int*const m_rtag;
const Scalar4*const m_pos;
const BoxDim m_gl_box;
const unsigned int m_Nslabs;
const unsigned int m_slab_index;
const Scalar3 m_invalid;
const flow_enum::Direction m_slab_direction;
__host__ __device__ Scalar3 operator()(const Scalar3& a,const Scalar3& b)const
{
Scalar3 result = m_invalid;
//Early exit, if invalid args involved.
if( a.z == m_invalid.z )
return b;
if( b.z == m_invalid.z )
return a;
const unsigned int idx_a = m_rtag[__scalar_as_int(a.z)];
const unsigned int idx_b = m_rtag[__scalar_as_int(b.z)];
unsigned int index_a,index_b;
switch( m_slab_direction )
{
case flow_enum::X:
index_a = (m_pos[idx_a].x/m_gl_box.getL().x +.5) * m_Nslabs;
index_b = (m_pos[idx_b].x/m_gl_box.getL().x +.5) * m_Nslabs;
break;
case flow_enum::Y:
index_a = (m_pos[idx_a].y/m_gl_box.getL().y +.5) * m_Nslabs;
index_b = (m_pos[idx_b].y/m_gl_box.getL().y +.5) * m_Nslabs;
break;
case flow_enum::Z:
index_a = (m_pos[idx_a].z/m_gl_box.getL().z +.5) * m_Nslabs;
index_b = (m_pos[idx_b].z/m_gl_box.getL().z +.5) * m_Nslabs;
break;
}
index_a %= m_Nslabs;
index_b %= m_Nslabs;
if( index_a == index_b)
{
if( index_a == m_slab_index )
{
CMP cmp;
if( cmp(a.x,b.x) )
result = a;
else
result = b;
}
}
else
{
if( index_a == m_slab_index )
result = a;
if( index_b == m_slab_index )
result = b;
}
return result;
}
};
hipError_t gpu_search_min_max_velocity(const unsigned int group_size,
const Scalar4*const d_vel,
const Scalar4*const d_pos,
const unsigned int *const d_tag,
const unsigned int *const d_rtag,
const unsigned int *const d_group_members,
const BoxDim gl_box,
const unsigned int Nslabs,
const unsigned int max_slab,
const unsigned int min_slab,
Scalar3*const last_max_vel,
Scalar3*const last_min_vel,
const bool has_max_slab,
const bool has_min_slab,
const unsigned int blocksize,
const flow_enum::Direction flow_direction,
const flow_enum::Direction slab_direction)
{
thrust::device_ptr<const unsigned int> member_ptr(d_group_members);
vel_search_un_opt un_opt(d_vel, d_tag,flow_direction);
if( has_max_slab )
{
vel_search_binary_opt<thrust::greater<const Scalar> > max_bin_opt(
d_rtag,d_pos,gl_box,Nslabs,max_slab,*last_max_vel,slab_direction);
Scalar3 init = *last_max_vel;
*last_max_vel = thrust::transform_reduce(member_ptr,member_ptr+group_size,
un_opt,init,max_bin_opt);
}
if( has_min_slab )
{
vel_search_binary_opt<thrust::less<const Scalar> > min_bin_opt(
d_rtag,d_pos,gl_box,Nslabs,min_slab,*last_min_vel,slab_direction);
Scalar3 init = *last_min_vel;
*last_min_vel = thrust::transform_reduce(member_ptr,member_ptr+group_size,
un_opt,init,min_bin_opt);
}
return hipPeekAtLastError();
}
void __global__ gpu_update_min_max_velocity_kernel(const unsigned int *const d_rtag,
Scalar4*const d_vel,
const unsigned int Ntotal,
const Scalar3 last_max_vel,
const Scalar3 last_min_vel,
const flow_enum::Direction flow_direction)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 1)
return;
const unsigned int min_tag = __scalar_as_int( last_min_vel.z);
const unsigned int min_idx = d_rtag[min_tag];
const unsigned int max_tag = __scalar_as_int( last_max_vel.z);
const unsigned int max_idx = d_rtag[max_tag];
//Is the particle local on the processor?
//Swap the particles the new velocities.
if( min_idx < Ntotal)
{
const Scalar new_min_vel = last_max_vel.x / last_min_vel.y;
switch( flow_direction )
{
case flow_enum::X:
d_vel[min_idx].x = new_min_vel;
break;
case flow_enum::Y:
d_vel[min_idx].y = new_min_vel;
break;
case flow_enum::Z:
d_vel[min_idx].z = new_min_vel;
break;
}
}
if( max_idx < Ntotal)
{
const Scalar new_max_vel = last_min_vel.x / last_max_vel.y;
switch( flow_direction)
{
case flow_enum::X:
d_vel[max_idx].x = new_max_vel;
break;
case flow_enum::Y:
d_vel[max_idx].y = new_max_vel;
break;
case flow_enum::Z:
d_vel[max_idx].z = new_max_vel;
break;
}
}
}
hipError_t gpu_update_min_max_velocity(const unsigned int *const d_rtag,
Scalar4*const d_vel,
const unsigned int Ntotal,
const Scalar3 last_max_vel,
const Scalar3 last_min_vel,
const flow_enum::Direction flow_direction)
{
dim3 grid( 1, 1, 1);
dim3 threads(1, 1, 1);
hipLaunchKernelGGL(( gpu_update_min_max_velocity_kernel), dim3(grid),dim3(threads), 0, 0, d_rtag, d_vel, Ntotal,last_max_vel,
last_min_vel, flow_direction);
return hipPeekAtLastError();
}
| 494801376d44a650bee445b295893bf3c7337dda.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "hoomd/HOOMDMath.h"
#include "MuellerPlatheFlow.h"
#include "MuellerPlatheFlowGPU.h"
#include "MuellerPlatheFlowGPU.cuh"
#include <assert.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
struct vel_search_un_opt : public thrust::unary_function< const unsigned int,Scalar3>
{
vel_search_un_opt(const Scalar4*const d_vel,const unsigned int *const d_tag,flow_enum::Direction flow_direction)
:
m_vel(d_vel),
m_tag(d_tag),
m_flow_direction(flow_direction)
{}
const Scalar4*const m_vel;
const unsigned int*const m_tag;
const flow_enum::Direction m_flow_direction;
__host__ __device__ Scalar3 operator()(const unsigned int idx)const
{
const unsigned int tag = m_tag[idx];
Scalar vel;
switch( m_flow_direction )
{
case flow_enum::X: vel = m_vel[idx].x; break;
case flow_enum::Y: vel = m_vel[idx].y; break;
case flow_enum::Z: vel = m_vel[idx].z; break;
}
const Scalar mass = m_vel[idx].w;
vel *= mass;
Scalar3 result;
result.x = vel;
result.y = mass;
result.z = __int_as_scalar(tag);
return result;
}
};
template <typename CMP>
struct vel_search_binary_opt : public thrust::binary_function< Scalar3, Scalar3, Scalar3 >
{
vel_search_binary_opt(const unsigned int*const d_rtag,
const Scalar4*const d_pos,
const BoxDim gl_box,
const unsigned int Nslabs,
const unsigned int slab_index,
const Scalar3 invalid,
const flow_enum::Direction slab_direction)
: m_rtag(d_rtag),
m_pos(d_pos),
m_gl_box(gl_box),
m_Nslabs(Nslabs),
m_slab_index(slab_index),
m_invalid(invalid),
m_slab_direction(slab_direction)
{}
const unsigned int*const m_rtag;
const Scalar4*const m_pos;
const BoxDim m_gl_box;
const unsigned int m_Nslabs;
const unsigned int m_slab_index;
const Scalar3 m_invalid;
const flow_enum::Direction m_slab_direction;
__host__ __device__ Scalar3 operator()(const Scalar3& a,const Scalar3& b)const
{
Scalar3 result = m_invalid;
//Early exit, if invalid args involved.
if( a.z == m_invalid.z )
return b;
if( b.z == m_invalid.z )
return a;
const unsigned int idx_a = m_rtag[__scalar_as_int(a.z)];
const unsigned int idx_b = m_rtag[__scalar_as_int(b.z)];
unsigned int index_a,index_b;
switch( m_slab_direction )
{
case flow_enum::X:
index_a = (m_pos[idx_a].x/m_gl_box.getL().x +.5) * m_Nslabs;
index_b = (m_pos[idx_b].x/m_gl_box.getL().x +.5) * m_Nslabs;
break;
case flow_enum::Y:
index_a = (m_pos[idx_a].y/m_gl_box.getL().y +.5) * m_Nslabs;
index_b = (m_pos[idx_b].y/m_gl_box.getL().y +.5) * m_Nslabs;
break;
case flow_enum::Z:
index_a = (m_pos[idx_a].z/m_gl_box.getL().z +.5) * m_Nslabs;
index_b = (m_pos[idx_b].z/m_gl_box.getL().z +.5) * m_Nslabs;
break;
}
index_a %= m_Nslabs;
index_b %= m_Nslabs;
if( index_a == index_b)
{
if( index_a == m_slab_index )
{
CMP cmp;
if( cmp(a.x,b.x) )
result = a;
else
result = b;
}
}
else
{
if( index_a == m_slab_index )
result = a;
if( index_b == m_slab_index )
result = b;
}
return result;
}
};
cudaError_t gpu_search_min_max_velocity(const unsigned int group_size,
const Scalar4*const d_vel,
const Scalar4*const d_pos,
const unsigned int *const d_tag,
const unsigned int *const d_rtag,
const unsigned int *const d_group_members,
const BoxDim gl_box,
const unsigned int Nslabs,
const unsigned int max_slab,
const unsigned int min_slab,
Scalar3*const last_max_vel,
Scalar3*const last_min_vel,
const bool has_max_slab,
const bool has_min_slab,
const unsigned int blocksize,
const flow_enum::Direction flow_direction,
const flow_enum::Direction slab_direction)
{
thrust::device_ptr<const unsigned int> member_ptr(d_group_members);
vel_search_un_opt un_opt(d_vel, d_tag,flow_direction);
if( has_max_slab )
{
vel_search_binary_opt<thrust::greater<const Scalar> > max_bin_opt(
d_rtag,d_pos,gl_box,Nslabs,max_slab,*last_max_vel,slab_direction);
Scalar3 init = *last_max_vel;
*last_max_vel = thrust::transform_reduce(member_ptr,member_ptr+group_size,
un_opt,init,max_bin_opt);
}
if( has_min_slab )
{
vel_search_binary_opt<thrust::less<const Scalar> > min_bin_opt(
d_rtag,d_pos,gl_box,Nslabs,min_slab,*last_min_vel,slab_direction);
Scalar3 init = *last_min_vel;
*last_min_vel = thrust::transform_reduce(member_ptr,member_ptr+group_size,
un_opt,init,min_bin_opt);
}
return cudaPeekAtLastError();
}
void __global__ gpu_update_min_max_velocity_kernel(const unsigned int *const d_rtag,
Scalar4*const d_vel,
const unsigned int Ntotal,
const Scalar3 last_max_vel,
const Scalar3 last_min_vel,
const flow_enum::Direction flow_direction)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 1)
return;
const unsigned int min_tag = __scalar_as_int( last_min_vel.z);
const unsigned int min_idx = d_rtag[min_tag];
const unsigned int max_tag = __scalar_as_int( last_max_vel.z);
const unsigned int max_idx = d_rtag[max_tag];
//Is the particle local on the processor?
//Swap the particles the new velocities.
if( min_idx < Ntotal)
{
const Scalar new_min_vel = last_max_vel.x / last_min_vel.y;
switch( flow_direction )
{
case flow_enum::X:
d_vel[min_idx].x = new_min_vel;
break;
case flow_enum::Y:
d_vel[min_idx].y = new_min_vel;
break;
case flow_enum::Z:
d_vel[min_idx].z = new_min_vel;
break;
}
}
if( max_idx < Ntotal)
{
const Scalar new_max_vel = last_min_vel.x / last_max_vel.y;
switch( flow_direction)
{
case flow_enum::X:
d_vel[max_idx].x = new_max_vel;
break;
case flow_enum::Y:
d_vel[max_idx].y = new_max_vel;
break;
case flow_enum::Z:
d_vel[max_idx].z = new_max_vel;
break;
}
}
}
cudaError_t gpu_update_min_max_velocity(const unsigned int *const d_rtag,
Scalar4*const d_vel,
const unsigned int Ntotal,
const Scalar3 last_max_vel,
const Scalar3 last_min_vel,
const flow_enum::Direction flow_direction)
{
dim3 grid( 1, 1, 1);
dim3 threads(1, 1, 1);
gpu_update_min_max_velocity_kernel<<<grid,threads>>>(d_rtag, d_vel, Ntotal,last_max_vel,
last_min_vel, flow_direction);
return cudaPeekAtLastError();
}
|
23339e7b1da24b9bd944c913f81d4607011cec25.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
__global__ void simple_kernel()
{
printf("hello from the kernel \n");
}
//int main(int argc, char ** argv)
//{
// int dev = 0;
// hipDeviceProp_t deviceProp;
// hipGetDeviceProperties(&deviceProp, dev);
//
// if (deviceProp.concurrentKernels == 0)
// {
// printf("> GPU does not support concurrent kernel execution \n");
// printf("kernel execution will be serialized \n");
// }
//
// hipStream_t str1, str2, str3;
//
// hipStreamCreate(&str1);
// hipStreamCreate(&str2);
// hipStreamCreate(&str3);
//
// simple_kernel << <1, 1, 0, str1 >> > ();
// simple_kernel << <1, 1, 0, str2 >> > ();
// simple_kernel << <1, 1, 0, str3 >> > ();
//
// hipStreamDestroy(str1);
// hipStreamDestroy(str2);
// hipStreamDestroy(str3);
//
// hipDeviceSynchronize();
// hipDeviceReset();
// return 0;
//} | 23339e7b1da24b9bd944c913f81d4607011cec25.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
__global__ void simple_kernel()
{
printf("hello from the kernel \n");
}
//int main(int argc, char ** argv)
//{
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaGetDeviceProperties(&deviceProp, dev);
//
// if (deviceProp.concurrentKernels == 0)
// {
// printf("> GPU does not support concurrent kernel execution \n");
// printf("kernel execution will be serialized \n");
// }
//
// cudaStream_t str1, str2, str3;
//
// cudaStreamCreate(&str1);
// cudaStreamCreate(&str2);
// cudaStreamCreate(&str3);
//
// simple_kernel << <1, 1, 0, str1 >> > ();
// simple_kernel << <1, 1, 0, str2 >> > ();
// simple_kernel << <1, 1, 0, str3 >> > ();
//
// cudaStreamDestroy(str1);
// cudaStreamDestroy(str2);
// cudaStreamDestroy(str3);
//
// cudaDeviceSynchronize();
// cudaDeviceReset();
// return 0;
//} |
2b6ecf002c545be8bf64aa93d54bf15474ef23c3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include "binary_ops.hpp"
namespace cudf {
namespace experimental {
namespace binops {
namespace compiled {
namespace {
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop {
binary_operator op;
apply_binop(binary_operator op) : op(op) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x, Rhs const& y) const {
switch (op) {
case binary_operator::EQUAL: return this->equal(x, y);
case binary_operator::NOT_EQUAL: return this->not_equal(x, y);
case binary_operator::LESS: return this->less(x, y);
case binary_operator::GREATER: return this->greater(x, y);
case binary_operator::LESS_EQUAL: return this->less_equal(x, y);
case binary_operator::GREATER_EQUAL: return this->greater_equal(x, y);
default: return Out{};
}
}
CUDA_DEVICE_CALLABLE Out equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x == y); }
CUDA_DEVICE_CALLABLE Out not_equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x != y); }
CUDA_DEVICE_CALLABLE Out less(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x < y); }
CUDA_DEVICE_CALLABLE Out greater(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x > y); }
CUDA_DEVICE_CALLABLE Out less_equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x <= y); }
CUDA_DEVICE_CALLABLE Out greater_equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x >= y); }
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_lhs_rhs : apply_binop<Lhs, Rhs, Out> {
cudf::experimental::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_lhs_rhs(binary_operator op, cudf::experimental::scalar_device_type_t<Rhs> scalar)
: apply_binop<Lhs, Rhs, Out>(op), scalar(scalar) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const {
return apply_binop<Lhs, Rhs, Out>::operator()(x, scalar.value());
}
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_rhs_lhs : apply_binop<Lhs, Rhs, Out> {
cudf::experimental::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_rhs_lhs(binary_operator op, cudf::experimental::scalar_device_type_t<Rhs> scalar)
: apply_binop<Lhs, Rhs, Out>(op), scalar(scalar) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const {
return apply_binop<Lhs, Rhs, Out>::operator()(scalar.value(), x);
}
};
template <typename Lhs, typename Rhs, typename Out>
struct binary_op {
std::unique_ptr<column> operator()(column_view const& lhs, scalar const& rhs, binary_operator op, data_type out_type, bool const reversed, rmm::mr::device_memory_resource* mr, hipStream_t stream) {
auto new_mask = binops::detail::scalar_col_valid_mask_and(lhs, rhs, stream, mr);
auto out = make_fixed_width_column(out_type, lhs.size(), new_mask,
rhs.is_valid(stream) ? cudf::UNKNOWN_NULL_COUNT : lhs.size(), stream, mr);
if (lhs.size() > 0 && rhs.is_valid(stream)) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_scalar = static_cast<cudf::experimental::scalar_type_t<Rhs> const&>(rhs);
auto rhs_scalar_view = get_scalar_device_view(rhs_scalar);
if (lhs.has_nulls()) {
auto lhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
reversed ?
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) :
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) ;
} else {
auto lhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*lhs_device_view] __device__ (size_type i) { return col.element<Lhs>(i); });
reversed ?
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) :
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) ;
}
}
CHECK_CUDA(stream);
return out;
}
std::unique_ptr<column> operator()(column_view const& lhs, column_view const& rhs, binary_operator op, data_type out_type, rmm::mr::device_memory_resource* mr, hipStream_t stream) {
auto new_mask = bitmask_and(table_view({lhs, rhs}), mr, stream);
auto out = make_fixed_width_column(out_type, lhs.size(), new_mask,
cudf::UNKNOWN_NULL_COUNT, stream, mr);
if (lhs.size() > 0) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_device_view = column_device_view::create(rhs, stream);
if (lhs.has_nulls() && rhs.has_nulls()) {
auto lhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
} else if (lhs.has_nulls()) {
auto lhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*rhs_device_view] __device__ (size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
} else if (rhs.has_nulls()) {
auto lhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*lhs_device_view] __device__ (size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
} else {
auto lhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*lhs_device_view] __device__ (size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*rhs_device_view] __device__ (size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
}
}
CHECK_CUDA(stream);
return out;
}
};
} // namespace
std::unique_ptr<column> binary_operation(scalar const& lhs, column_view const& rhs, binary_operator op, data_type output_type, rmm::mr::device_memory_resource* mr, hipStream_t stream) {
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, cudf::experimental::bool8>{}(rhs, lhs, op, output_type, true, mr, stream);
}
std::unique_ptr<column> binary_operation(column_view const& lhs, scalar const& rhs, binary_operator op, data_type output_type, rmm::mr::device_memory_resource* mr, hipStream_t stream) {
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, cudf::experimental::bool8>{}(lhs, rhs, op, output_type, false, mr, stream);
}
std::unique_ptr<column> binary_operation(column_view const& lhs, column_view const& rhs, binary_operator op, data_type output_type, rmm::mr::device_memory_resource* mr, hipStream_t stream) {
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, cudf::experimental::bool8>{}(lhs, rhs, op, output_type, mr, stream);
}
} // namespace compiled
} // namespace binops
} // namespace experimental
} // namespace cudf
| 2b6ecf002c545be8bf64aa93d54bf15474ef23c3.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include "binary_ops.hpp"
namespace cudf {
namespace experimental {
namespace binops {
namespace compiled {
namespace {
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop {
binary_operator op;
apply_binop(binary_operator op) : op(op) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x, Rhs const& y) const {
switch (op) {
case binary_operator::EQUAL: return this->equal(x, y);
case binary_operator::NOT_EQUAL: return this->not_equal(x, y);
case binary_operator::LESS: return this->less(x, y);
case binary_operator::GREATER: return this->greater(x, y);
case binary_operator::LESS_EQUAL: return this->less_equal(x, y);
case binary_operator::GREATER_EQUAL: return this->greater_equal(x, y);
default: return Out{};
}
}
CUDA_DEVICE_CALLABLE Out equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x == y); }
CUDA_DEVICE_CALLABLE Out not_equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x != y); }
CUDA_DEVICE_CALLABLE Out less(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x < y); }
CUDA_DEVICE_CALLABLE Out greater(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x > y); }
CUDA_DEVICE_CALLABLE Out less_equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x <= y); }
CUDA_DEVICE_CALLABLE Out greater_equal(Lhs const& x, Rhs const& y) const { return static_cast<Out>(x >= y); }
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_lhs_rhs : apply_binop<Lhs, Rhs, Out> {
cudf::experimental::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_lhs_rhs(binary_operator op, cudf::experimental::scalar_device_type_t<Rhs> scalar)
: apply_binop<Lhs, Rhs, Out>(op), scalar(scalar) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const {
return apply_binop<Lhs, Rhs, Out>::operator()(x, scalar.value());
}
};
template <typename Lhs, typename Rhs, typename Out>
struct apply_binop_scalar_rhs_lhs : apply_binop<Lhs, Rhs, Out> {
cudf::experimental::scalar_device_type_t<Rhs> scalar;
apply_binop_scalar_rhs_lhs(binary_operator op, cudf::experimental::scalar_device_type_t<Rhs> scalar)
: apply_binop<Lhs, Rhs, Out>(op), scalar(scalar) {}
CUDA_DEVICE_CALLABLE Out operator()(Lhs const& x) const {
return apply_binop<Lhs, Rhs, Out>::operator()(scalar.value(), x);
}
};
template <typename Lhs, typename Rhs, typename Out>
struct binary_op {
std::unique_ptr<column> operator()(column_view const& lhs, scalar const& rhs, binary_operator op, data_type out_type, bool const reversed, rmm::mr::device_memory_resource* mr, cudaStream_t stream) {
auto new_mask = binops::detail::scalar_col_valid_mask_and(lhs, rhs, stream, mr);
auto out = make_fixed_width_column(out_type, lhs.size(), new_mask,
rhs.is_valid(stream) ? cudf::UNKNOWN_NULL_COUNT : lhs.size(), stream, mr);
if (lhs.size() > 0 && rhs.is_valid(stream)) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_scalar = static_cast<cudf::experimental::scalar_type_t<Rhs> const&>(rhs);
auto rhs_scalar_view = get_scalar_device_view(rhs_scalar);
if (lhs.has_nulls()) {
auto lhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
reversed ?
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) :
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) ;
} else {
auto lhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*lhs_device_view] __device__ (size_type i) { return col.element<Lhs>(i); });
reversed ?
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_rhs_lhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) :
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), out_itr, apply_binop_scalar_lhs_rhs<Lhs, Rhs, Out>{op, rhs_scalar_view}) ;
}
}
CHECK_CUDA(stream);
return out;
}
std::unique_ptr<column> operator()(column_view const& lhs, column_view const& rhs, binary_operator op, data_type out_type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) {
auto new_mask = bitmask_and(table_view({lhs, rhs}), mr, stream);
auto out = make_fixed_width_column(out_type, lhs.size(), new_mask,
cudf::UNKNOWN_NULL_COUNT, stream, mr);
if (lhs.size() > 0) {
auto out_view = out->mutable_view();
auto out_itr = out_view.begin<Out>();
auto lhs_device_view = column_device_view::create(lhs, stream);
auto rhs_device_view = column_device_view::create(rhs, stream);
if (lhs.has_nulls() && rhs.has_nulls()) {
auto lhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
} else if (lhs.has_nulls()) {
auto lhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*lhs_device_view, Lhs{});
auto rhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*rhs_device_view] __device__ (size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
} else if (rhs.has_nulls()) {
auto lhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*lhs_device_view] __device__ (size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = cudf::experimental::detail::make_null_replacement_iterator(*rhs_device_view, Rhs{});
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
} else {
auto lhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*lhs_device_view] __device__ (size_type i) { return col.element<Lhs>(i); });
auto rhs_itr = thrust::make_transform_iterator(thrust::make_counting_iterator(size_type{0}),
[col=*rhs_device_view] __device__ (size_type i) { return col.element<Rhs>(i); });
thrust::transform(rmm::exec_policy(stream)->on(stream), lhs_itr, lhs_itr + lhs.size(), rhs_itr, out_itr, apply_binop<Lhs, Rhs, Out>{op});
}
}
CHECK_CUDA(stream);
return out;
}
};
} // namespace
std::unique_ptr<column> binary_operation(scalar const& lhs, column_view const& rhs, binary_operator op, data_type output_type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) {
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, cudf::experimental::bool8>{}(rhs, lhs, op, output_type, true, mr, stream);
}
std::unique_ptr<column> binary_operation(column_view const& lhs, scalar const& rhs, binary_operator op, data_type output_type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) {
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, cudf::experimental::bool8>{}(lhs, rhs, op, output_type, false, mr, stream);
}
std::unique_ptr<column> binary_operation(column_view const& lhs, column_view const& rhs, binary_operator op, data_type output_type, rmm::mr::device_memory_resource* mr, cudaStream_t stream) {
// hard-coded to only work with cudf::string_view so we don't explode compile times
CUDF_EXPECTS(lhs.type().id() == cudf::STRING, "Invalid/Unsupported lhs datatype");
CUDF_EXPECTS(rhs.type().id() == cudf::STRING, "Invalid/Unsupported rhs datatype");
CUDF_EXPECTS(is_boolean(output_type), "Invalid/Unsupported output datatype");
return binary_op<cudf::string_view, cudf::string_view, cudf::experimental::bool8>{}(lhs, rhs, op, output_type, mr, stream);
}
} // namespace compiled
} // namespace binops
} // namespace experimental
} // namespace cudf
|
aca1a3b8b716f183eb8c4601a6cec21dafe7b944.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void set_value(int *c, int number)
{
c[0] = number;
}
int main()
{
int *dev0_data;
int *dev1_data;
int current_data=0;
hipSetDevice(0);
hipMalloc((void**)&dev0_data, 1 * sizeof(int));
hipMemcpy(dev0_data, ¤t_data, 1 * sizeof(int), hipMemcpyHostToDevice);
hipSetDevice(1);
hipMalloc((void**)&dev1_data, 1 * sizeof(int));
hipMemcpy(dev0_data, ¤t_data, 1 * sizeof(int), hipMemcpyHostToDevice);
printf("Memoriy allocated...\n");
hipSetDevice(0);
set_value << <1, 1 >> > (dev0_data,1);
hipDeviceSynchronize();
hipSetDevice(1);
set_value << <1, 1 >> > (dev1_data, 9);
hipDeviceSynchronize();
printf("Kernels ok\n");
hipSetDevice(0);
hipMemcpy(¤t_data, dev0_data, 1 * sizeof(int), hipMemcpyDeviceToHost);
printf("DEV0: %i\n", current_data);
hipSetDevice(1);
hipMemcpy(¤t_data, dev1_data, 1 * sizeof(int), hipMemcpyDeviceToHost);
printf("DEV1: %i\n", current_data);
hipMemcpyPeer(dev0_data,0, dev1_data,1,1*sizeof(int));
printf("Swap ok\n");
hipSetDevice(0);
hipMemcpy(¤t_data, dev0_data, 1 * sizeof(int), hipMemcpyDeviceToHost);
printf("DEV0: %i\n",current_data);
hipSetDevice(1);
hipMemcpy(¤t_data, dev1_data, 1 * sizeof(int), hipMemcpyDeviceToHost);
printf("DEV1: %i\n", current_data);
hipFree(dev0_data);
hipFree(dev1_data);
return 0;
}
| aca1a3b8b716f183eb8c4601a6cec21dafe7b944.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void set_value(int *c, int number)
{
c[0] = number;
}
int main()
{
int *dev0_data;
int *dev1_data;
int current_data=0;
cudaSetDevice(0);
cudaMalloc((void**)&dev0_data, 1 * sizeof(int));
cudaMemcpy(dev0_data, ¤t_data, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaSetDevice(1);
cudaMalloc((void**)&dev1_data, 1 * sizeof(int));
cudaMemcpy(dev0_data, ¤t_data, 1 * sizeof(int), cudaMemcpyHostToDevice);
printf("Memoriy allocated...\n");
cudaSetDevice(0);
set_value << <1, 1 >> > (dev0_data,1);
cudaDeviceSynchronize();
cudaSetDevice(1);
set_value << <1, 1 >> > (dev1_data, 9);
cudaDeviceSynchronize();
printf("Kernels… ok\n");
cudaSetDevice(0);
cudaMemcpy(¤t_data, dev0_data, 1 * sizeof(int), cudaMemcpyDeviceToHost);
printf("DEV0: %i\n", current_data);
cudaSetDevice(1);
cudaMemcpy(¤t_data, dev1_data, 1 * sizeof(int), cudaMemcpyDeviceToHost);
printf("DEV1: %i\n", current_data);
cudaMemcpyPeer(dev0_data,0, dev1_data,1,1*sizeof(int));
printf("Swap… ok\n");
cudaSetDevice(0);
cudaMemcpy(¤t_data, dev0_data, 1 * sizeof(int), cudaMemcpyDeviceToHost);
printf("DEV0: %i\n",current_data);
cudaSetDevice(1);
cudaMemcpy(¤t_data, dev1_data, 1 * sizeof(int), cudaMemcpyDeviceToHost);
printf("DEV1: %i\n", current_data);
cudaFree(dev0_data);
cudaFree(dev1_data);
return 0;
}
|
367ffac48c53a71627bfbd07be1cfa64aad9a6d1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* asyncAPI.cu
*
* Created on: Sep 3, 2014
* Author: chunk
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "util.h"
__global__ void vecAdd(int* A, int* B, int* C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
void display(int *a, int length) {
printf("\n");
for (int i = 0; i < length; i++) {
printf("%d ", a[i]);
}
printf("\n");
}
bool verify(int *a, int *b, int len) {
for (int i = 0; i < len; i++) {
if (a[i] != b[i])
return false;
}
return true;
}
int main(int argc, char **argv) {
int devId;
hipDeviceProp_t devProp;
devId = findCudaDevice(argc, (const char **) argv);
hipGetDeviceProperties(&devProp, devId);
printf("cuda device info : %d - [%s]\n", devId, devProp.name);
const int n = 1024;
const int nbyte = n * sizeof(int);
int *h_A, *h_B, *h_C, *sum;
int *d_A, *d_B, *d_C;
hipHostMalloc(&h_A, nbyte);
hipHostMalloc(&h_B, nbyte);
hipHostMalloc(&h_C, nbyte);
// h_C = (int *) malloc(nbyte);
hipHostMalloc(&sum, nbyte);
hipMalloc(&d_A, nbyte);
hipMalloc(&d_B, nbyte);
hipMalloc(&d_C, nbyte);
hipMemset(&d_A, 0, nbyte);
hipMemset(&d_B, 0, nbyte);
hipMemset(&d_C, 0, nbyte);
hipStream_t stream0, stream1, stream2;
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
StopWatchInterface *timer = NULL;
mTimer mtimer;
cuTimer cutmer;
sdkCreateTimer(&timer);
// srand(time(0));
for (int i = 0; i < n; i++) {
h_A[i] = (int) (1024.0 * rand() / (RAND_MAX + 1.0));
h_B[i] = (int) (1024.0 * rand() / (RAND_MAX + 1.0));
sum[i] = h_A[i] + h_B[i];
}
sdkStartTimer(&timer);
mtimer.start();
cutmer.start();
/**
*
* ____
* ____|___ ___ ___
*/
hipEventRecord(start, stream0);
hipMemcpyAsync(d_A, h_A, nbyte, hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(d_B, h_B, nbyte, hipMemcpyHostToDevice, stream1);
// hipStreamSynchronize(stream0);
// hipStreamSynchronize(stream1);
// vecAdd<<<1, n, 0, stream0>>>(d_A, d_B, d_C);
// hipStreamSynchronize(stream0);
display(h_C, n);
checkCudaErrors(
hipMemcpyAsync(h_C, d_C, nbyte, hipMemcpyDeviceToHost, stream0));
hipStreamSynchronize(stream0);
hipEventRecord(stop, stream0);
display(h_C, n);
cutmer.end();
mtimer.end();
sdkStopTimer(&timer);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
mtimer.showTime();
cutmer.showTime();
if (verify(h_C, sum, n))
printf("Checking OK.\n");
else
printf("Checking Eroor!\n");
hipEventDestroy(start);
hipEventDestroy(stop);
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(h_C);
// free(h_C);
hipHostFree(sum);
hipFree(d_A);
hipFree(d_B);
checkCudaErrors(hipFree((void * )d_C));
checkCudaErrors(hipDeviceReset());
}
| 367ffac48c53a71627bfbd07be1cfa64aad9a6d1.cu | /*
* asyncAPI.cu
*
* Created on: Sep 3, 2014
* Author: chunk
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "util.h"
__global__ void vecAdd(int* A, int* B, int* C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
void display(int *a, int length) {
printf("\n");
for (int i = 0; i < length; i++) {
printf("%d ", a[i]);
}
printf("\n");
}
bool verify(int *a, int *b, int len) {
for (int i = 0; i < len; i++) {
if (a[i] != b[i])
return false;
}
return true;
}
int main(int argc, char **argv) {
int devId;
cudaDeviceProp devProp;
devId = findCudaDevice(argc, (const char **) argv);
cudaGetDeviceProperties(&devProp, devId);
printf("cuda device info : %d - [%s]\n", devId, devProp.name);
const int n = 1024;
const int nbyte = n * sizeof(int);
int *h_A, *h_B, *h_C, *sum;
int *d_A, *d_B, *d_C;
cudaMallocHost(&h_A, nbyte);
cudaMallocHost(&h_B, nbyte);
cudaMallocHost(&h_C, nbyte);
// h_C = (int *) malloc(nbyte);
cudaMallocHost(&sum, nbyte);
cudaMalloc(&d_A, nbyte);
cudaMalloc(&d_B, nbyte);
cudaMalloc(&d_C, nbyte);
cudaMemset(&d_A, 0, nbyte);
cudaMemset(&d_B, 0, nbyte);
cudaMemset(&d_C, 0, nbyte);
cudaStream_t stream0, stream1, stream2;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
StopWatchInterface *timer = NULL;
mTimer mtimer;
cuTimer cutmer;
sdkCreateTimer(&timer);
// srand(time(0));
for (int i = 0; i < n; i++) {
h_A[i] = (int) (1024.0 * rand() / (RAND_MAX + 1.0));
h_B[i] = (int) (1024.0 * rand() / (RAND_MAX + 1.0));
sum[i] = h_A[i] + h_B[i];
}
sdkStartTimer(&timer);
mtimer.start();
cutmer.start();
/**
*
* ____
* ____|___ ___ ___
*/
cudaEventRecord(start, stream0);
cudaMemcpyAsync(d_A, h_A, nbyte, cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_B, h_B, nbyte, cudaMemcpyHostToDevice, stream1);
// cudaStreamSynchronize(stream0);
// cudaStreamSynchronize(stream1);
// vecAdd<<<1, n, 0, stream0>>>(d_A, d_B, d_C);
// cudaStreamSynchronize(stream0);
display(h_C, n);
checkCudaErrors(
cudaMemcpyAsync(h_C, d_C, nbyte, cudaMemcpyDeviceToHost, stream0));
cudaStreamSynchronize(stream0);
cudaEventRecord(stop, stream0);
display(h_C, n);
cutmer.end();
mtimer.end();
sdkStopTimer(&timer);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
mtimer.showTime();
cutmer.showTime();
if (verify(h_C, sum, n))
printf("Checking OK.\n");
else
printf("Checking Eroor!\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_C);
// free(h_C);
cudaFreeHost(sum);
cudaFree(d_A);
cudaFree(d_B);
checkCudaErrors(cudaFree((void * )d_C));
checkCudaErrors(cudaDeviceReset());
}
|
1ba9e2b6eb8cc694dfb2214e05488baab6502221.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Convolution_2D_globalMemory(unsigned char* imgInput,unsigned char* imgOutput, const float* mask, int height, int width, int channels) {
int Row, Col, filterRow, filterCol;
int rows = threadIdx.x + blockIdx.x * blockDim.x;
int cols = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0;
Row = rows - MASK_WIDTH /2;
Col = cols - MASK_WIDTH /2;
for (int c = 0; c < channels; c++)
{
sum = 0;
for (int i = 0; i < MASK_WIDTH; i++)
{
for (int j = 0; j < MASK_WIDTH; j++)
{
filterRow = Row + i;
filterCol = Col + j;
if ((filterRow >= 0) && (filterRow < height) && (filterCol >= 0) && (filterCol < width))
{
sum += imgInput[(filterRow * height + filterCol) * channels + c] * mask[i * MASK_WIDTH + j];
}
else { sum = 0; }
}
}
imgOutput[(rows * width + cols) * channels + c] = (unsigned char)sum;
}
} | 1ba9e2b6eb8cc694dfb2214e05488baab6502221.cu | #include "includes.h"
__global__ void Convolution_2D_globalMemory(unsigned char* imgInput,unsigned char* imgOutput, const float* mask, int height, int width, int channels) {
int Row, Col, filterRow, filterCol;
int rows = threadIdx.x + blockIdx.x * blockDim.x;
int cols = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0;
Row = rows - MASK_WIDTH /2;
Col = cols - MASK_WIDTH /2;
for (int c = 0; c < channels; c++)
{
sum = 0;
for (int i = 0; i < MASK_WIDTH; i++)
{
for (int j = 0; j < MASK_WIDTH; j++)
{
filterRow = Row + i;
filterCol = Col + j;
if ((filterRow >= 0) && (filterRow < height) && (filterCol >= 0) && (filterCol < width))
{
sum += imgInput[(filterRow * height + filterCol) * channels + c] * mask[i * MASK_WIDTH + j];
}
else { sum = 0; }
}
}
imgOutput[(rows * width + cols) * channels + c] = (unsigned char)sum;
}
} |
4e24372fb5354cc7720bfe0986a4233aee4c779a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_dense_stereo.h"
#include <stdexcept>
#include "launch_utils.h"
#include "MatUtils.h"
#include "patch_score.h"
#include "disparity.h"
#include "InvalidValue.h"
#include "ImageApron.h"
namespace roo
{
const int MinDisparity = 0;
const int DefaultRad = 2;
//typedef SSNDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
typedef SANDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
//typedef SinglePixelSqPatchScore<float,ImgAccessRaw> DefaultSafeScoreType;
//////////////////////////////////////////////////////
// Cost Volume minimum
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
Tvol bestc = vol(x,y,0);
const int maxDisp = min(maxDispVal, x+1);
for(int d=1; d < maxDisp; ++d) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
template<typename Tdisp, typename Tvol>
void CostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDisp)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
hipLaunchKernelGGL(( KernCostVolMinimum<Tdisp,Tvol>), dim3(gridDim),dim3(blockDim), 0, 0, disp,vol,maxDisp);
}
template void CostVolMinimum<>(Image<char>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned short>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned char>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<unsigned short>,unsigned);
//////////////////////////////////////////////////////
// Cost Volume minimum subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSubpix(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal, float sd)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < disp.w && y < disp.h ) {
Tdisp bestd = 0;
Tvol bestc = 1E10;
for(int d=0; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Fit parabola to neighbours
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = vol(x,y,dl);
const float sr = vol(x,y,dr);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Minima of parabola
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
disp(x,y) = out;
}
}
void CostVolMinimumSubpix(Image<float> disp, Volume<float> vol, unsigned maxDisp, float sd)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
hipLaunchKernelGGL(( KernCostVolMinimumSubpix<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, disp,vol,maxDisp,sd);
}
//////////////////////////////////////////////////////
// Cost Volume minimum square penalty subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSquarePenaltySubpix(Image<Tdisp> imga, Volume<Tvol> vol, Image<float> imgd, unsigned maxDispVal, float sd, float lambda, float theta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imga.w && y < imga.h ) {
const float lastd = imgd(x,y);
const float inv2theta = 1.0f / (2.0f*theta);
Tdisp bestd = 0;
Tvol bestc = inv2theta*lastd*lastd + lambda * vol(x,y,0);
for(int d=1; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const float ddif = lastd - d;
const Tvol c = inv2theta*ddif*ddif + lambda * vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Newton Step
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = inv2theta*(lastd-dl)*(lastd-dl) + lambda * vol(x,y,dl); //vol(x,y,d3);
const float sr = inv2theta*(lastd-dr)*(lastd-dr) + lambda * vol(x,y,dr); //vol(x,y,d1);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
imga(x,y) = out;
}
}
void CostVolMinimumSquarePenaltySubpix(Image<float> imga, Volume<float> vol, Image<float> imgd, unsigned maxDisp, float sd, float lambda, float theta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imga);
hipLaunchKernelGGL(( KernCostVolMinimumSquarePenaltySubpix<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, imga,vol,imgd,maxDisp,sd,lambda,theta);
}
//////////////////////////////////////////////////////
// Edge Weight
//////////////////////////////////////////////////////
__global__ void KernExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imgi.w && y < imgi.h ) {
float2 grad = make_float2(0,0);
if(0<x && x<imgi.w-1) grad.x = imgi.GetCentralDiffDx<float>(x,y);
if(0<y && y<imgi.h-1) grad.y = imgi.GetCentralDiffDy<float>(x,y);
// if(0<x && x<imgi.w) grad.x = imgi.GetBackwardDiffDx<float>(x,y);
// if(0<y && y<imgi.h) grad.y = imgi.GetBackwardDiffDy<float>(x,y);
const float w = expf( -alpha * powf(sqrt(grad.x*grad.x + grad.y*grad.y),beta) );
imgw(x,y) = w;
}
}
void ExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imgw);
hipLaunchKernelGGL(( KernExponentialEdgeWeight), dim3(gridDim),dim3(blockDim), 0, 0, imgw,imgi,alpha,beta);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDenseStereo(
Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
TD bestDisp = InvalidValue<TD>::Value();
if( Score::width <= x && x < (dCamLeft.w - Score::width) &&
Score::height <= y && y < (dCamLeft.h - Score::height) )
{
// Search for best matching pixel
float bestScore = 1E+36;
TD sndBestDisp = InvalidValue<TD>::Value();
float sndBestScore = 1E+37;
TD minDisp = min(maxDispVal, (TD)0);
TD maxDisp = max((TD)0, maxDispVal);
minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
maxDisp = min((int)maxDisp, (int)(x + Score::width));
for(TD c = minDisp; c <= maxDisp; c += dispStep ) {
const float score = Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
if(score < bestScore) {
sndBestDisp = bestDisp;
sndBestScore = bestScore;
bestDisp = c;
bestScore = score;
}else if( score <= sndBestScore) {
sndBestDisp = c;
sndBestScore = score;
}
}
if(abs(bestDisp-sndBestDisp) > 1) {
const float cd = (sndBestScore - bestScore) / bestScore;
if( cd < acceptThresh ) {
bestDisp = InvalidValue<TD>::Value();
}
}
}
dDisp(x,y) = bestDisp;
}
const int MAXBW = 512;
//template<typename TD, typename TI, typename Score>
//__global__ void KernDenseStereo(
// Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
//) {
// const int x = blockIdx.x*blockDim.x + threadIdx.x;
// const int y = blockIdx.y*blockDim.y + threadIdx.y;
// const int W = Score::width;
// const int RAD = W / 2;
//// TI patch[W*W];
// // only enough shared mem to cache right image
//// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_l;
// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_r;
// ///////////////////////////////////
//// // Cache line of right/left image +/- RAD
//// apron_l.CacheImage(dCamLeft);
// apron_r.CacheImage(dCamRight);
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of colums for norm
////// int colsuml = 0;
//// int colsumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// colsuml += apron_l.GetRelThread(0,i);
//// colsumr += apron_r.GetRelThread(0,i);
//// }
////// col_avg_l.GetRelThread(0,0) = colsuml / W;
//// col_avg_r.GetRelThread(0,0) = colsumr / W;
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of block for norm
////// int suml = 0;
//// int sumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// suml += col_avg_l.GetRelThreadClampX(i,0);
//// sumr += col_avg_r.GetRelThreadClampX(i,0);
//// }
////// avg_l.GetRelThread(0,0) = suml / W;
//// avg_r.GetRelThread(0,0) = sumr / W;
// ///////////////////////////////////
// // Cache left patch, compute mean
////// int sum_l = 0;
//// for(int r=-RAD; r<= RAD; ++r) {
////#pragma unroll
//// for(int c=-RAD; c<=RAD; ++c) {
//// const TI val = dCamLeft.GetWithClampedRange(x+c, y+r);
//// patch[(RAD+r)*W+(RAD+c)] = val;
////// sum_l += val;
//// }
//// }
//// const TI avg_l = sum_l / (W*W);
// __syncthreads();
// TD bestDisp = InvalidValue<TD>::Value();
// if( maxDispVal+Score::width <= x && x < (dCamLeft.w - Score::width) &&
// Score::height <= y && y < (dCamLeft.h - Score::height) )
// {
// // Search for best matching pixel
// float bestScore = 1E+36;
//// TD sndBestDisp = InvalidValue<TD>::Value();
//// float sndBestScore = 1E+37;
//// TD minDisp = min(maxDispVal, (TD)0);
//// TD maxDisp = max((TD)0, maxDispVal);
//// minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
//// maxDisp = min((int)maxDisp, (int)(x + Score::width));
// for(TD c = 0; c <= maxDispVal; c += 1 ) {
// float score = 0;
// for(int ky=-RAD; ky <= RAD; ++ky ) {
//#pragma unroll
// for(int kx=-RAD; kx <= RAD; ++kx ) {
//// const int pl = apron_l.GetRelThread(kx,ky);
// const int pl = 0;//patch[(RAD+ky)*W+(RAD+kx)];
// const int pr = apron_r.GetRelThread(kx-c,ky);
// score += abs(pl - pr);
// }
// }
////// Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
// if(score < bestScore) {
//// sndBestDisp = bestDisp;
//// sndBestScore = bestScore;
// bestDisp = c;
// bestScore = score;
//// }else if( score <= sndBestScore) {
//// sndBestDisp = c;
//// sndBestScore = score;
// }
// }
//// if(abs(bestDisp-sndBestDisp) > 1) {
//// const float cd = (sndBestScore - bestScore) / bestScore;
//// if( cd < acceptThresh ) {
//// bestDisp = InvalidValue<TD>::Value();
//// }
//// }
// }
// dDisp(x,y) = bestDisp;
//}
template<typename TDisp, typename TImg>
void DenseStereo(
Image<TDisp> dDisp, const Image<TImg> dCamLeft, const Image<TImg> dCamRight,
TDisp maxDisp, float acceptThresh, int score_rad
) {
dim3 blockDim(dDisp.w, 1);
dim3 gridDim(1, dDisp.h);
// InitDimFromOutputImageOver(blockDim,gridDim,dDisp);
const TDisp dispStep = 1;
if( score_rad == 0 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SinglePixelSqPatchScore<float,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 1 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,1,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 2 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,2,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 3 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,3,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 4 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,4,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 5 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,5,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 6 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,6,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 7 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,7,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}
}
template void DenseStereo<unsigned char, unsigned char>(Image<unsigned char>, const Image<unsigned char>, const Image<unsigned char>, unsigned char, float, int);
template void DenseStereo<char, unsigned char>(Image<char>, const Image<unsigned char>, const Image<unsigned char>, char, float, int);
void DenseStereoSubpix(
Image<float> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, float maxDisp, float dispStep, float acceptThresh, int score_rad, bool score_normed
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
// if(score_normed) {
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 6 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,6,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 7 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,7,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }else{
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }
}
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
const int RAD = 3;
const int W = 2*RAD+1;
__global__ void KernDenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int x = threadIdx.x;
const int y = blockIdx.y;
__shared__ unsigned char cache_l[W][MAXBW];
__shared__ unsigned char cache_r[W][MAXBW+1];
#pragma unroll
for(int r=0; r<W; ++r ) {
cache_l[r][x] = dCamLeft.Get(x,y+r-RAD);
cache_r[r][x] = dCamRight.Get(x,y+r-RAD);
}
__syncthreads();
int bestScore = 0xFFFFF;
int bestDisp = 0;
const int maxClipDisp = min(x-RAD,maxDisp);
for(int d=0; d<maxClipDisp; ++d)
{
const int xd = x-d;
int score = 0;
#pragma unroll
for(int r=0; r<W; ++r) {
score += abs(cache_l[r][x] - cache_r[r][xd]);
// const int yr = y-RAD+r;
// score += abs(dCamLeft(x,yr) - dCamRight(xd,yr));
}
if(score < bestScore) {
bestScore = score;
bestDisp = d;
}
}
dDisp(x,y) = bestDisp;
}
void DenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int w = dDisp.w;
const int h = dDisp.h - 2*RAD;
const int x = 0;
const int y = RAD;
dim3 blockDim(w, 1);
dim3 gridDim(1, h);
hipLaunchKernelGGL(( KernDenseStereoTest), dim3(gridDim),dim3(blockDim), 0, 0, dDisp.SubImage(x,y,w,h), dCamLeft.SubImage(x,y,w,h), dCamRight.SubImage(x,y,w,h), maxDisp);
}
//////////////////////////////////////////////////////
// Check Left and Right disparity images match
//////////////////////////////////////////////////////
template<typename TD>
__global__ void KernLeftRightCheck(
Image<TD> dispL, Image<TD> dispR, float sd, float maxDiff
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( dispL.InBounds(x,y) ) {
const TD dl = dispL(x,y);
const TD xr = x + sd*dl;
if( 0 <= xr && xr < dispR.w) {
const TD dr = dispR(xr, y);
if(!InvalidValue<TD>::IsValid(dr) || abs(dl - dr) > maxDiff) {
dispL(x,y) = InvalidValue<TD>::Value();
}
}else{
dispL(x,y) = InvalidValue<TD>::Value();
}
}
}
void LeftRightCheck(Image<char> dispL, Image<char> dispR, int sd, int maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
hipLaunchKernelGGL(( KernLeftRightCheck<char>), dim3(gridDim),dim3(blockDim), 0, 0, dispL, dispR, sd, maxDiff);
}
void LeftRightCheck(Image<float> dispL, Image<float> dispR, float sd, float maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
hipLaunchKernelGGL(( KernLeftRightCheck<float>), dim3(gridDim),dim3(blockDim), 0, 0, dispL, dispR, sd, maxDiff);
}
//////////////////////////////////////////////////////
// Visualise cross section of disparity image
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDisparityImageCrossSection(
Image<TD> dScore, Image<unsigned char> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, int y
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int c = blockIdx.y*blockDim.y + threadIdx.y;
const int rx = x-c;
const float score = ( 0<= rx && rx < dCamRight.w ) ? Score::Score(dCamLeft, x,y, dCamRight, rx, y) : 0;
const unsigned char mindisp = dDisp(x,y);
const float show = sqrt(score / Score::area) / 255.0f;
dScore(x,c) = show * make_float4( 1,1,1,1);
}
void DisparityImageCrossSection(
Image<float4> dScore, Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
hipLaunchKernelGGL(( KernDisparityImageCrossSection<float4, unsigned char, DefaultSafeScoreType>), dim3(gridDim),dim3(blockDim), 0, 0, dScore, dDisp, dCamLeft, dCamRight, y);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo sub-pixel refinement
//////////////////////////////////////////////////////
template<typename TDo, typename TDi, typename TI, typename Score>
__global__ void KernDenseStereoSubpixelRefine(
Image<TDo> dDispOut, const Image<TDi> dDisp, const Image<TI> dCamLeft, const Image<TI> dCamRight
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int bestDisp = dDisp(x,y);
// Ignore things at infinity
if(bestDisp < MinDisparity) {
dDispOut(x,y) = InvalidValue<TDo>::Value();
return;
}
// Fit parabola to neighbours
const float d1 = bestDisp+1;
const float d2 = bestDisp;
const float d3 = bestDisp-1;
const float s1 = Score::Score(dCamLeft, x,y, dCamRight, x-d1,y);
const float s2 = Score::Score(dCamLeft, x,y, dCamRight, x-d2,y);
const float s3 = Score::Score(dCamLeft, x,y, dCamRight, x-d3,y);
// Cooefficients of parabola through (d1,s1),(d2,s2),(d3,s3)
const float denom = (d1 - d2)*(d1 - d3)*(d2 - d3);
const float A = (d3 * (s2 - s1) + d2 * (s1 - s3) + d1 * (s3 - s2)) / denom;
const float B = (d3*d3 * (s1 - s2) + d2*d2 * (s3 - s1) + d1*d1 * (s2 - s3)) / denom;
// const float C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom;
// Minima of parabola
const float newDisp = -B / (2*A);
// Check that minima is sensible. Otherwise assume bad data.
if( d3 < newDisp && newDisp < d1 ) {
dDispOut(x,y) = newDisp;
}else{
// dDisp(x,y) = bestDisp / maxDisp;
dDispOut(x,y) = InvalidValue<TDo>::Value();
}
}
void DenseStereoSubpixelRefine(
Image<float> dDispOut, const Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
hipLaunchKernelGGL(( KernDenseStereoSubpixelRefine<float,unsigned char,unsigned char, DefaultSafeScoreType>), dim3(gridDim),dim3(blockDim), 0, 0, dDispOut, dDisp, dCamLeft, dCamRight);
}
//////////////////////////////////////////////////////
// Upgrade disparity image to vertex array
//////////////////////////////////////////////////////
__global__ void KernDisparityImageToVbo(
Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
dVbo(u,v) = DepthFromDisparity(u,v, dDisp(u,v), baseline, fu, fv, u0, v0, MinDisparity);
}
void DisparityImageToVbo(Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0)
{
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dVbo);
hipLaunchKernelGGL(( KernDisparityImageToVbo), dim3(gridDim),dim3(blockDim), 0, 0, dVbo, dDisp, baseline, fu, fv, u0, v0);
}
//////////////////////////////////////////////////////
// Cost Volume
//////////////////////////////////////////////////////
void CostVolumeZero(Volume<CostVolElem> costvol )
{
CostVolElem initial;
initial.sum = 0;
initial.n = 0;
#ifndef _MSC_VER
costvol.Fill(initial);
#else
// Cannot use thrust::fill on windows with aligned structure.
throw std::runtime_error("Not implemented on MSVC.");
#endif
}
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernCostVolumeFromStereo(
Volume<CostVolElem> dvol, Image<TI> dimgl, Image<TI> dimgr
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
if( u-d >= (int)Score::rad) {
CostVolElem elem;
elem.sum = Score::Score(dimgl, u,v, dimgr, u-d, v) / Score::area;
elem.n = 1;
dvol(u,v,d) = elem;
}
}
void CostVolumeFromStereo(Volume<CostVolElem> dvol, Image<unsigned char> dimgl, Image<unsigned char> dimgr )
{
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
hipLaunchKernelGGL(( KernCostVolumeFromStereo<unsigned char, unsigned char, DefaultSafeScoreType>), dim3(gridDim),dim3(blockDim), 0, 0, dvol,dimgl,dimgr);
}
//////////////////////////////////////////////////////
template<typename TI, typename Score>
__global__ void KernAddToCostVolume(
Volume<CostVolElem> dvol, const Image<TI> dimgv,
const Image<TI> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline
){
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
float3 Pv;
Pv.z = fu * baseline / d;
Pv.x = Pv.z * (u-u0) / fu;
Pv.y = Pv.z * (v-v0) / fv;
const float3 KPc = KT_cv * Pv;
const float2 pc = dn(KPc);
if( KPc.z > 0 && dimgc.InBounds(pc.x, pc.y,5) ) {
// vol(u,v,d) = 1.0f;
const float score = Score::Score(dimgv, u,v, dimgc, pc.x, pc.y) / (float)(Score::area);
// const float score = (dimgv(u,v) - dimgc.template GetBilinear<float>(pc)) / 255.0f;
CostVolElem elem = dvol(u,v,d);
elem.sum += score;
elem.n += 1;
dvol(u,v,d) = elem;
}
}
void CostVolumeAdd(Volume<CostVolElem> dvol, const Image<unsigned char> dimgv,
const Image<unsigned char> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline, int levels
) {
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
hipLaunchKernelGGL(( KernAddToCostVolume<unsigned char, SANDPatchScore<float,DefaultRad,ImgAccessBilinearClamped<float> > >), dim3(gridDim),dim3(blockDim), 0, 0, dvol,dimgv,dimgc, KT_cv, fu,fv,u0,v0, baseline);
}
//////////////////////////////////////////////////////
template<typename Tdisp>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<CostVolElem> vol)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
float bestc = 1E30;
unsigned maxDisp = vol.d;
#pragma unroll
for(int d=0; d < maxDisp; ++d) {
const CostVolElem elem = vol(x,y,d);
const float c = (elem.sum / elem.n);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
void CostVolMinimum(Image<float> disp, Volume<CostVolElem> vol)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
hipLaunchKernelGGL(( KernCostVolMinimum<float>), dim3(gridDim),dim3(blockDim), 0, 0, disp,vol);
}
//////////////////////////////////////////////////////
__global__ void KernCostVolumeCrossSection(
Image<float> dScore, Image<CostVolElem> dCostVolSlice
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int d = blockIdx.y*blockDim.y + threadIdx.y;
if( dCostVolSlice.InBounds(x,d) )
{
CostVolElem elem = dCostVolSlice(x,d);
const float score = (elem.sum / elem.n) / 255.0f;
dScore(x,d) = score;
}else{
dScore(x,d) = InvalidValue<float>::Value();
}
}
void CostVolumeCrossSection(
Image<float> dScore, Volume<CostVolElem> dCostVol, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
hipLaunchKernelGGL(( KernCostVolumeCrossSection), dim3(gridDim),dim3(blockDim), 0, 0, dScore, dCostVol.ImageXZ(y));
}
//////////////////////////////////////////////////////
template<typename To, typename Ti>
__global__ void KernFilterDispGrad(Image<To> dOut, Image<Ti> dIn, float threshold )
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float dx = dOut.template GetCentralDiffDx<float>(x,y);
const float dy = dOut.template GetCentralDiffDy<float>(x,y);
const bool valid = dx*dx + dy*dy < threshold;
dOut(x,y) = valid ? dIn(x,y) : -1;
}
void FilterDispGrad(
Image<float> dOut, Image<float> dIn, float threshold
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dOut, 16, 16);
hipLaunchKernelGGL(( KernFilterDispGrad<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, dOut, dIn, threshold);
}
//////////////////////////////////////////////////////
// Cost volume with truncated grad and abs. diff. score
// Fast Cost-Volume Filtering for Visual Correspondence and Beyond
// Christoph Rhemann, Asmaa Hosni, Michael Bleyer, Carsten Rother, Margrit Gelautz
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
__global__ void KernCostVolumeFromStereoTruncatedAbsAndGrad(
Volume<Tout> dvol, Image<Tin> dimgl, Image<Tin> dimgr, float sd,
float alpha, float r1, float r2
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
alpha = 0;
r1 = 1e37;
const int r = u + sd*d;
if( 0 <= r && r < dimgr.w ) {
const float absI = fabs( (float)dimgr(r,v) - (float)dimgl(u,v));
const float absGrad = fabs( dimgr.template GetCentralDiffDx<float>(r,v) - dimgl.template GetCentralDiffDx<float>(u,v) );
const Tout cost = (1.0f-alpha)*min(absI,r1) + alpha*min(absGrad,r2);
dvol(u,v,d) = cost;
}else{
dvol(u,v,d) = (1.0f-alpha)*r1 + alpha*r2;
}
}
void CostVolumeFromStereoTruncatedAbsAndGrad(Volume<float> dvol, Image<float> dimgl, Image<float> dimgr, float sd, float alpha, float r1, float r2 )
{
dim3 blockDim(8,8,8);
dim3 gridDim( ceil(dvol.w / (float)blockDim.x), ceil(dvol.h / (float)blockDim.y), ceil(dvol.d / (float)blockDim.z) );
hipLaunchKernelGGL(( KernCostVolumeFromStereoTruncatedAbsAndGrad<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, dvol,dimgl,dimgr,sd, alpha,r1,r2);
}
}
| 4e24372fb5354cc7720bfe0986a4233aee4c779a.cu | #include "cu_dense_stereo.h"
#include <stdexcept>
#include "launch_utils.h"
#include "MatUtils.h"
#include "patch_score.h"
#include "disparity.h"
#include "InvalidValue.h"
#include "ImageApron.h"
namespace roo
{
const int MinDisparity = 0;
const int DefaultRad = 2;
//typedef SSNDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
typedef SANDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
//typedef SinglePixelSqPatchScore<float,ImgAccessRaw> DefaultSafeScoreType;
//////////////////////////////////////////////////////
// Cost Volume minimum
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
Tvol bestc = vol(x,y,0);
const int maxDisp = min(maxDispVal, x+1);
for(int d=1; d < maxDisp; ++d) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
template<typename Tdisp, typename Tvol>
void CostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDisp)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
KernCostVolMinimum<Tdisp,Tvol><<<gridDim,blockDim>>>(disp,vol,maxDisp);
}
template void CostVolMinimum<>(Image<char>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned short>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned char>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<unsigned short>,unsigned);
//////////////////////////////////////////////////////
// Cost Volume minimum subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSubpix(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal, float sd)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < disp.w && y < disp.h ) {
Tdisp bestd = 0;
Tvol bestc = 1E10;
for(int d=0; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Fit parabola to neighbours
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = vol(x,y,dl);
const float sr = vol(x,y,dr);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Minima of parabola
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
disp(x,y) = out;
}
}
void CostVolMinimumSubpix(Image<float> disp, Volume<float> vol, unsigned maxDisp, float sd)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
KernCostVolMinimumSubpix<float,float><<<gridDim,blockDim>>>(disp,vol,maxDisp,sd);
}
//////////////////////////////////////////////////////
// Cost Volume minimum square penalty subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSquarePenaltySubpix(Image<Tdisp> imga, Volume<Tvol> vol, Image<float> imgd, unsigned maxDispVal, float sd, float lambda, float theta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imga.w && y < imga.h ) {
const float lastd = imgd(x,y);
const float inv2theta = 1.0f / (2.0f*theta);
Tdisp bestd = 0;
Tvol bestc = inv2theta*lastd*lastd + lambda * vol(x,y,0);
for(int d=1; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const float ddif = lastd - d;
const Tvol c = inv2theta*ddif*ddif + lambda * vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Newton Step
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = inv2theta*(lastd-dl)*(lastd-dl) + lambda * vol(x,y,dl); //vol(x,y,d3);
const float sr = inv2theta*(lastd-dr)*(lastd-dr) + lambda * vol(x,y,dr); //vol(x,y,d1);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
imga(x,y) = out;
}
}
void CostVolMinimumSquarePenaltySubpix(Image<float> imga, Volume<float> vol, Image<float> imgd, unsigned maxDisp, float sd, float lambda, float theta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imga);
KernCostVolMinimumSquarePenaltySubpix<float,float><<<gridDim,blockDim>>>(imga,vol,imgd,maxDisp,sd,lambda,theta);
}
//////////////////////////////////////////////////////
// Edge Weight
//////////////////////////////////////////////////////
__global__ void KernExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imgi.w && y < imgi.h ) {
float2 grad = make_float2(0,0);
if(0<x && x<imgi.w-1) grad.x = imgi.GetCentralDiffDx<float>(x,y);
if(0<y && y<imgi.h-1) grad.y = imgi.GetCentralDiffDy<float>(x,y);
// if(0<x && x<imgi.w) grad.x = imgi.GetBackwardDiffDx<float>(x,y);
// if(0<y && y<imgi.h) grad.y = imgi.GetBackwardDiffDy<float>(x,y);
const float w = expf( -alpha * powf(sqrt(grad.x*grad.x + grad.y*grad.y),beta) );
imgw(x,y) = w;
}
}
void ExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imgw);
KernExponentialEdgeWeight<<<gridDim,blockDim>>>(imgw,imgi,alpha,beta);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDenseStereo(
Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
TD bestDisp = InvalidValue<TD>::Value();
if( Score::width <= x && x < (dCamLeft.w - Score::width) &&
Score::height <= y && y < (dCamLeft.h - Score::height) )
{
// Search for best matching pixel
float bestScore = 1E+36;
TD sndBestDisp = InvalidValue<TD>::Value();
float sndBestScore = 1E+37;
TD minDisp = min(maxDispVal, (TD)0);
TD maxDisp = max((TD)0, maxDispVal);
minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
maxDisp = min((int)maxDisp, (int)(x + Score::width));
for(TD c = minDisp; c <= maxDisp; c += dispStep ) {
const float score = Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
if(score < bestScore) {
sndBestDisp = bestDisp;
sndBestScore = bestScore;
bestDisp = c;
bestScore = score;
}else if( score <= sndBestScore) {
sndBestDisp = c;
sndBestScore = score;
}
}
if(abs(bestDisp-sndBestDisp) > 1) {
const float cd = (sndBestScore - bestScore) / bestScore;
if( cd < acceptThresh ) {
bestDisp = InvalidValue<TD>::Value();
}
}
}
dDisp(x,y) = bestDisp;
}
const int MAXBW = 512;
//template<typename TD, typename TI, typename Score>
//__global__ void KernDenseStereo(
// Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
//) {
// const int x = blockIdx.x*blockDim.x + threadIdx.x;
// const int y = blockIdx.y*blockDim.y + threadIdx.y;
// const int W = Score::width;
// const int RAD = W / 2;
//// TI patch[W*W];
// // only enough shared mem to cache right image
//// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_l;
// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_r;
// ///////////////////////////////////
//// // Cache line of right/left image +/- RAD
//// apron_l.CacheImage(dCamLeft);
// apron_r.CacheImage(dCamRight);
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of colums for norm
////// int colsuml = 0;
//// int colsumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// colsuml += apron_l.GetRelThread(0,i);
//// colsumr += apron_r.GetRelThread(0,i);
//// }
////// col_avg_l.GetRelThread(0,0) = colsuml / W;
//// col_avg_r.GetRelThread(0,0) = colsumr / W;
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of block for norm
////// int suml = 0;
//// int sumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// suml += col_avg_l.GetRelThreadClampX(i,0);
//// sumr += col_avg_r.GetRelThreadClampX(i,0);
//// }
////// avg_l.GetRelThread(0,0) = suml / W;
//// avg_r.GetRelThread(0,0) = sumr / W;
// ///////////////////////////////////
// // Cache left patch, compute mean
////// int sum_l = 0;
//// for(int r=-RAD; r<= RAD; ++r) {
////#pragma unroll
//// for(int c=-RAD; c<=RAD; ++c) {
//// const TI val = dCamLeft.GetWithClampedRange(x+c, y+r);
//// patch[(RAD+r)*W+(RAD+c)] = val;
////// sum_l += val;
//// }
//// }
//// const TI avg_l = sum_l / (W*W);
// __syncthreads();
// TD bestDisp = InvalidValue<TD>::Value();
// if( maxDispVal+Score::width <= x && x < (dCamLeft.w - Score::width) &&
// Score::height <= y && y < (dCamLeft.h - Score::height) )
// {
// // Search for best matching pixel
// float bestScore = 1E+36;
//// TD sndBestDisp = InvalidValue<TD>::Value();
//// float sndBestScore = 1E+37;
//// TD minDisp = min(maxDispVal, (TD)0);
//// TD maxDisp = max((TD)0, maxDispVal);
//// minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
//// maxDisp = min((int)maxDisp, (int)(x + Score::width));
// for(TD c = 0; c <= maxDispVal; c += 1 ) {
// float score = 0;
// for(int ky=-RAD; ky <= RAD; ++ky ) {
//#pragma unroll
// for(int kx=-RAD; kx <= RAD; ++kx ) {
//// const int pl = apron_l.GetRelThread(kx,ky);
// const int pl = 0;//patch[(RAD+ky)*W+(RAD+kx)];
// const int pr = apron_r.GetRelThread(kx-c,ky);
// score += abs(pl - pr);
// }
// }
////// Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
// if(score < bestScore) {
//// sndBestDisp = bestDisp;
//// sndBestScore = bestScore;
// bestDisp = c;
// bestScore = score;
//// }else if( score <= sndBestScore) {
//// sndBestDisp = c;
//// sndBestScore = score;
// }
// }
//// if(abs(bestDisp-sndBestDisp) > 1) {
//// const float cd = (sndBestScore - bestScore) / bestScore;
//// if( cd < acceptThresh ) {
//// bestDisp = InvalidValue<TD>::Value();
//// }
//// }
// }
// dDisp(x,y) = bestDisp;
//}
template<typename TDisp, typename TImg>
void DenseStereo(
Image<TDisp> dDisp, const Image<TImg> dCamLeft, const Image<TImg> dCamRight,
TDisp maxDisp, float acceptThresh, int score_rad
) {
dim3 blockDim(dDisp.w, 1);
dim3 gridDim(1, dDisp.h);
// InitDimFromOutputImageOver(blockDim,gridDim,dDisp);
const TDisp dispStep = 1;
if( score_rad == 0 ) {
KernDenseStereo<TDisp, TImg, SinglePixelSqPatchScore<float,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 1 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,1,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 2 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,2,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 3 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,3,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 4 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,4,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 5 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,5,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 6 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,6,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 7 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,7,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}
}
template void DenseStereo<unsigned char, unsigned char>(Image<unsigned char>, const Image<unsigned char>, const Image<unsigned char>, unsigned char, float, int);
template void DenseStereo<char, unsigned char>(Image<char>, const Image<unsigned char>, const Image<unsigned char>, char, float, int);
void DenseStereoSubpix(
Image<float> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, float maxDisp, float dispStep, float acceptThresh, int score_rad, bool score_normed
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
// if(score_normed) {
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 6 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,6,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 7 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,7,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }else{
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }
}
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
const int RAD = 3;
const int W = 2*RAD+1;
__global__ void KernDenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int x = threadIdx.x;
const int y = blockIdx.y;
__shared__ unsigned char cache_l[W][MAXBW];
__shared__ unsigned char cache_r[W][MAXBW+1];
#pragma unroll
for(int r=0; r<W; ++r ) {
cache_l[r][x] = dCamLeft.Get(x,y+r-RAD);
cache_r[r][x] = dCamRight.Get(x,y+r-RAD);
}
__syncthreads();
int bestScore = 0xFFFFF;
int bestDisp = 0;
const int maxClipDisp = min(x-RAD,maxDisp);
for(int d=0; d<maxClipDisp; ++d)
{
const int xd = x-d;
int score = 0;
#pragma unroll
for(int r=0; r<W; ++r) {
score += abs(cache_l[r][x] - cache_r[r][xd]);
// const int yr = y-RAD+r;
// score += abs(dCamLeft(x,yr) - dCamRight(xd,yr));
}
if(score < bestScore) {
bestScore = score;
bestDisp = d;
}
}
dDisp(x,y) = bestDisp;
}
void DenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int w = dDisp.w;
const int h = dDisp.h - 2*RAD;
const int x = 0;
const int y = RAD;
dim3 blockDim(w, 1);
dim3 gridDim(1, h);
KernDenseStereoTest<<<gridDim,blockDim>>>(dDisp.SubImage(x,y,w,h), dCamLeft.SubImage(x,y,w,h), dCamRight.SubImage(x,y,w,h), maxDisp);
}
//////////////////////////////////////////////////////
// Check Left and Right disparity images match
//////////////////////////////////////////////////////
template<typename TD>
__global__ void KernLeftRightCheck(
Image<TD> dispL, Image<TD> dispR, float sd, float maxDiff
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( dispL.InBounds(x,y) ) {
const TD dl = dispL(x,y);
const TD xr = x + sd*dl;
if( 0 <= xr && xr < dispR.w) {
const TD dr = dispR(xr, y);
if(!InvalidValue<TD>::IsValid(dr) || abs(dl - dr) > maxDiff) {
dispL(x,y) = InvalidValue<TD>::Value();
}
}else{
dispL(x,y) = InvalidValue<TD>::Value();
}
}
}
void LeftRightCheck(Image<char> dispL, Image<char> dispR, int sd, int maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
KernLeftRightCheck<char><<<gridDim,blockDim>>>(dispL, dispR, sd, maxDiff);
}
void LeftRightCheck(Image<float> dispL, Image<float> dispR, float sd, float maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
KernLeftRightCheck<float><<<gridDim,blockDim>>>(dispL, dispR, sd, maxDiff);
}
//////////////////////////////////////////////////////
// Visualise cross section of disparity image
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDisparityImageCrossSection(
Image<TD> dScore, Image<unsigned char> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, int y
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int c = blockIdx.y*blockDim.y + threadIdx.y;
const int rx = x-c;
const float score = ( 0<= rx && rx < dCamRight.w ) ? Score::Score(dCamLeft, x,y, dCamRight, rx, y) : 0;
const unsigned char mindisp = dDisp(x,y);
const float show = sqrt(score / Score::area) / 255.0f;
dScore(x,c) = show * make_float4( 1,1,1,1);
}
void DisparityImageCrossSection(
Image<float4> dScore, Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
KernDisparityImageCrossSection<float4, unsigned char, DefaultSafeScoreType><<<gridDim,blockDim>>>(dScore, dDisp, dCamLeft, dCamRight, y);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo sub-pixel refinement
//////////////////////////////////////////////////////
template<typename TDo, typename TDi, typename TI, typename Score>
__global__ void KernDenseStereoSubpixelRefine(
Image<TDo> dDispOut, const Image<TDi> dDisp, const Image<TI> dCamLeft, const Image<TI> dCamRight
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int bestDisp = dDisp(x,y);
// Ignore things at infinity
if(bestDisp < MinDisparity) {
dDispOut(x,y) = InvalidValue<TDo>::Value();
return;
}
// Fit parabola to neighbours
const float d1 = bestDisp+1;
const float d2 = bestDisp;
const float d3 = bestDisp-1;
const float s1 = Score::Score(dCamLeft, x,y, dCamRight, x-d1,y);
const float s2 = Score::Score(dCamLeft, x,y, dCamRight, x-d2,y);
const float s3 = Score::Score(dCamLeft, x,y, dCamRight, x-d3,y);
// Cooefficients of parabola through (d1,s1),(d2,s2),(d3,s3)
const float denom = (d1 - d2)*(d1 - d3)*(d2 - d3);
const float A = (d3 * (s2 - s1) + d2 * (s1 - s3) + d1 * (s3 - s2)) / denom;
const float B = (d3*d3 * (s1 - s2) + d2*d2 * (s3 - s1) + d1*d1 * (s2 - s3)) / denom;
// const float C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom;
// Minima of parabola
const float newDisp = -B / (2*A);
// Check that minima is sensible. Otherwise assume bad data.
if( d3 < newDisp && newDisp < d1 ) {
dDispOut(x,y) = newDisp;
}else{
// dDisp(x,y) = bestDisp / maxDisp;
dDispOut(x,y) = InvalidValue<TDo>::Value();
}
}
void DenseStereoSubpixelRefine(
Image<float> dDispOut, const Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
KernDenseStereoSubpixelRefine<float,unsigned char,unsigned char, DefaultSafeScoreType><<<gridDim,blockDim>>>(dDispOut, dDisp, dCamLeft, dCamRight);
}
//////////////////////////////////////////////////////
// Upgrade disparity image to vertex array
//////////////////////////////////////////////////////
__global__ void KernDisparityImageToVbo(
Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
dVbo(u,v) = DepthFromDisparity(u,v, dDisp(u,v), baseline, fu, fv, u0, v0, MinDisparity);
}
void DisparityImageToVbo(Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0)
{
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dVbo);
KernDisparityImageToVbo<<<gridDim,blockDim>>>(dVbo, dDisp, baseline, fu, fv, u0, v0);
}
//////////////////////////////////////////////////////
// Cost Volume
//////////////////////////////////////////////////////
void CostVolumeZero(Volume<CostVolElem> costvol )
{
CostVolElem initial;
initial.sum = 0;
initial.n = 0;
#ifndef _MSC_VER
costvol.Fill(initial);
#else
// Cannot use thrust::fill on windows with aligned structure.
throw std::runtime_error("Not implemented on MSVC.");
#endif
}
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernCostVolumeFromStereo(
Volume<CostVolElem> dvol, Image<TI> dimgl, Image<TI> dimgr
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
if( u-d >= (int)Score::rad) {
CostVolElem elem;
elem.sum = Score::Score(dimgl, u,v, dimgr, u-d, v) / Score::area;
elem.n = 1;
dvol(u,v,d) = elem;
}
}
void CostVolumeFromStereo(Volume<CostVolElem> dvol, Image<unsigned char> dimgl, Image<unsigned char> dimgr )
{
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
KernCostVolumeFromStereo<unsigned char, unsigned char, DefaultSafeScoreType><<<gridDim,blockDim>>>(dvol,dimgl,dimgr);
}
//////////////////////////////////////////////////////
template<typename TI, typename Score>
__global__ void KernAddToCostVolume(
Volume<CostVolElem> dvol, const Image<TI> dimgv,
const Image<TI> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline
){
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
float3 Pv;
Pv.z = fu * baseline / d;
Pv.x = Pv.z * (u-u0) / fu;
Pv.y = Pv.z * (v-v0) / fv;
const float3 KPc = KT_cv * Pv;
const float2 pc = dn(KPc);
if( KPc.z > 0 && dimgc.InBounds(pc.x, pc.y,5) ) {
// vol(u,v,d) = 1.0f;
const float score = Score::Score(dimgv, u,v, dimgc, pc.x, pc.y) / (float)(Score::area);
// const float score = (dimgv(u,v) - dimgc.template GetBilinear<float>(pc)) / 255.0f;
CostVolElem elem = dvol(u,v,d);
elem.sum += score;
elem.n += 1;
dvol(u,v,d) = elem;
}
}
void CostVolumeAdd(Volume<CostVolElem> dvol, const Image<unsigned char> dimgv,
const Image<unsigned char> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline, int levels
) {
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
KernAddToCostVolume<unsigned char, SANDPatchScore<float,DefaultRad,ImgAccessBilinearClamped<float> > ><<<gridDim,blockDim>>>(dvol,dimgv,dimgc, KT_cv, fu,fv,u0,v0, baseline);
}
//////////////////////////////////////////////////////
template<typename Tdisp>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<CostVolElem> vol)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
float bestc = 1E30;
unsigned maxDisp = vol.d;
#pragma unroll
for(int d=0; d < maxDisp; ++d) {
const CostVolElem elem = vol(x,y,d);
const float c = (elem.sum / elem.n);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
void CostVolMinimum(Image<float> disp, Volume<CostVolElem> vol)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
KernCostVolMinimum<float><<<gridDim,blockDim>>>(disp,vol);
}
//////////////////////////////////////////////////////
__global__ void KernCostVolumeCrossSection(
Image<float> dScore, Image<CostVolElem> dCostVolSlice
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int d = blockIdx.y*blockDim.y + threadIdx.y;
if( dCostVolSlice.InBounds(x,d) )
{
CostVolElem elem = dCostVolSlice(x,d);
const float score = (elem.sum / elem.n) / 255.0f;
dScore(x,d) = score;
}else{
dScore(x,d) = InvalidValue<float>::Value();
}
}
void CostVolumeCrossSection(
Image<float> dScore, Volume<CostVolElem> dCostVol, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
KernCostVolumeCrossSection<<<gridDim,blockDim>>>(dScore, dCostVol.ImageXZ(y));
}
//////////////////////////////////////////////////////
template<typename To, typename Ti>
__global__ void KernFilterDispGrad(Image<To> dOut, Image<Ti> dIn, float threshold )
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float dx = dOut.template GetCentralDiffDx<float>(x,y);
const float dy = dOut.template GetCentralDiffDy<float>(x,y);
const bool valid = dx*dx + dy*dy < threshold;
dOut(x,y) = valid ? dIn(x,y) : -1;
}
void FilterDispGrad(
Image<float> dOut, Image<float> dIn, float threshold
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dOut, 16, 16);
KernFilterDispGrad<float,float><<<gridDim,blockDim>>>(dOut, dIn, threshold);
}
//////////////////////////////////////////////////////
// Cost volume with truncated grad and abs. diff. score
// Fast Cost-Volume Filtering for Visual Correspondence and Beyond
// Christoph Rhemann, Asmaa Hosni, Michael Bleyer, Carsten Rother, Margrit Gelautz
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
__global__ void KernCostVolumeFromStereoTruncatedAbsAndGrad(
Volume<Tout> dvol, Image<Tin> dimgl, Image<Tin> dimgr, float sd,
float alpha, float r1, float r2
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
alpha = 0;
r1 = 1e37;
const int r = u + sd*d;
if( 0 <= r && r < dimgr.w ) {
const float absI = fabs( (float)dimgr(r,v) - (float)dimgl(u,v));
const float absGrad = fabs( dimgr.template GetCentralDiffDx<float>(r,v) - dimgl.template GetCentralDiffDx<float>(u,v) );
const Tout cost = (1.0f-alpha)*min(absI,r1) + alpha*min(absGrad,r2);
dvol(u,v,d) = cost;
}else{
dvol(u,v,d) = (1.0f-alpha)*r1 + alpha*r2;
}
}
void CostVolumeFromStereoTruncatedAbsAndGrad(Volume<float> dvol, Image<float> dimgl, Image<float> dimgr, float sd, float alpha, float r1, float r2 )
{
dim3 blockDim(8,8,8);
dim3 gridDim( ceil(dvol.w / (float)blockDim.x), ceil(dvol.h / (float)blockDim.y), ceil(dvol.d / (float)blockDim.z) );
KernCostVolumeFromStereoTruncatedAbsAndGrad<float,float><<<gridDim,blockDim>>>(dvol,dimgl,dimgr,sd, alpha,r1,r2);
}
}
|
dad30703cd8d3e083857e330fee0ac3dbeb51196.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdiocu.h>
#include <ctypecu.h>
#include <assert.h>
static __global__ void g_ctype_test1() {
printf("ctype_test1\n");
//fprintf_(stdout, "ctype_test1\n");
//// ISCTYPE ////
//extern __forceinline__ __device__ int isctype_(int c, int type);
bool _0 = isctype('a', 0x02); assert(_0);
//// ISALNUM, ISALPHA, ISCNTRL, ISDIGIT, ISLOWER, ISGRAPH, ISPRINT, ISPUNCT, ISSPACE, ISUPPER, ISXDIGIT ////
//extern __forceinline__ __device__ int isalnum_(int c);
//extern __forceinline__ __device__ int isalpha_(int c);
//extern __forceinline__ __device__ int iscntrl_(int c);
//extern __forceinline__ __device__ int isdigit_(int c);
//extern __forceinline__ __device__ int islower_(int c);
//extern __forceinline__ __device__ int isgraph_(int c);
//extern __forceinline__ __device__ int isprint_(int c);
//extern __forceinline__ __device__ int ispunct_(int c);
//extern __forceinline__ __device__ int isspace_(int c);
//extern __forceinline__ __device__ int isupper_(int c);
//extern __forceinline__ __device__ int isxdigit_(int c);
bool a0 = isalnum('a'); bool a0n = isalnum('1'); assert(a0 && a0n);
bool a1 = isalpha('a'); bool a1n = isalpha('A'); assert(a1 && a1n);
bool a2 = iscntrl('a'); bool a2n = iscntrl('A'); assert(!a2 && !a2n);
bool a3 = isdigit('a'); bool a3n = isdigit('1'); assert(!a3 && a3n);
bool a4 = islower('a'); bool a4n = islower('A'); assert(a4 && !a4n);
bool a5 = isgraph('a'); bool a5n = isgraph('A'); assert(!a5 && !a5n);
bool a6 = isprint('a'); bool a6n = isprint('A'); assert(a6 && a6n);
bool a7 = ispunct('a'); bool a7n = ispunct('A'); assert(!a7 && !a7n);
bool a8 = isspace('a'); bool a8n = isspace(' '); assert(!a8 && a8n);
bool a9 = isupper('a'); bool a9n = isupper('A'); assert(!a9 && a9n);
bool aA = isxdigit('a'); bool aAn = isxdigit('A'); assert(aA && aAn);
//// TOLOWER, TOUPPER, _TOLOWER, _TOUPPER ////
//extern __forceinline__ __device__ int tolower_(int c);
//extern __forceinline__ __device__ int toupper_(int c);
////existing: #define _tolower(c)
////existing: #define _toupper(c)
char b0 = tolower('a'); char b0n = tolower('A'); assert(b0 == 'a' && b0n == 'a');
char b1 = toupper('a'); char b1n = toupper('A'); assert(b1 == 'A' && b1n == 'A');
char b2 = _toupper('a'); char b2n = _toupper('A'); assert(b2 == 'A' && b2n != 'A');
char b3 = _tolower('a'); char b3n = _tolower('A'); assert(b3 != 'a' && b3n == 'a');
//// ISBLANK, ISIDCHAR ////
//extern __forceinline__ __device__ int isblank_(int c);
//extern __forceinline__ __device__ int isidchar_(int c);
bool c0 = isblank(' '); bool c0n = isblank('A'); assert(c0 && !c0n);
bool c1 = isidchar('a'); bool c1n = isidchar('A'); assert(c1 && c1n);
//bool d0 = ispoweroftwo(2); bool d0n = ispoweroftwo(3); assert(d0 && !d0n);
//bool d1 = isalpha2('a'); bool d1n = isalpha2('A'); assert(d1 && d1n);
}
hipError_t ctype_test1() {hipLaunchKernelGGL(( g_ctype_test1), dim3(1), dim3(1), 0, 0, ); return hipDeviceSynchronize(); }
| dad30703cd8d3e083857e330fee0ac3dbeb51196.cu | #include <stdiocu.h>
#include <ctypecu.h>
#include <assert.h>
static __global__ void g_ctype_test1() {
printf("ctype_test1\n");
//fprintf_(stdout, "ctype_test1\n");
//// ISCTYPE ////
//extern __forceinline__ __device__ int isctype_(int c, int type);
bool _0 = isctype('a', 0x02); assert(_0);
//// ISALNUM, ISALPHA, ISCNTRL, ISDIGIT, ISLOWER, ISGRAPH, ISPRINT, ISPUNCT, ISSPACE, ISUPPER, ISXDIGIT ////
//extern __forceinline__ __device__ int isalnum_(int c);
//extern __forceinline__ __device__ int isalpha_(int c);
//extern __forceinline__ __device__ int iscntrl_(int c);
//extern __forceinline__ __device__ int isdigit_(int c);
//extern __forceinline__ __device__ int islower_(int c);
//extern __forceinline__ __device__ int isgraph_(int c);
//extern __forceinline__ __device__ int isprint_(int c);
//extern __forceinline__ __device__ int ispunct_(int c);
//extern __forceinline__ __device__ int isspace_(int c);
//extern __forceinline__ __device__ int isupper_(int c);
//extern __forceinline__ __device__ int isxdigit_(int c);
bool a0 = isalnum('a'); bool a0n = isalnum('1'); assert(a0 && a0n);
bool a1 = isalpha('a'); bool a1n = isalpha('A'); assert(a1 && a1n);
bool a2 = iscntrl('a'); bool a2n = iscntrl('A'); assert(!a2 && !a2n);
bool a3 = isdigit('a'); bool a3n = isdigit('1'); assert(!a3 && a3n);
bool a4 = islower('a'); bool a4n = islower('A'); assert(a4 && !a4n);
bool a5 = isgraph('a'); bool a5n = isgraph('A'); assert(!a5 && !a5n);
bool a6 = isprint('a'); bool a6n = isprint('A'); assert(a6 && a6n);
bool a7 = ispunct('a'); bool a7n = ispunct('A'); assert(!a7 && !a7n);
bool a8 = isspace('a'); bool a8n = isspace(' '); assert(!a8 && a8n);
bool a9 = isupper('a'); bool a9n = isupper('A'); assert(!a9 && a9n);
bool aA = isxdigit('a'); bool aAn = isxdigit('A'); assert(aA && aAn);
//// TOLOWER, TOUPPER, _TOLOWER, _TOUPPER ////
//extern __forceinline__ __device__ int tolower_(int c);
//extern __forceinline__ __device__ int toupper_(int c);
////existing: #define _tolower(c)
////existing: #define _toupper(c)
char b0 = tolower('a'); char b0n = tolower('A'); assert(b0 == 'a' && b0n == 'a');
char b1 = toupper('a'); char b1n = toupper('A'); assert(b1 == 'A' && b1n == 'A');
char b2 = _toupper('a'); char b2n = _toupper('A'); assert(b2 == 'A' && b2n != 'A');
char b3 = _tolower('a'); char b3n = _tolower('A'); assert(b3 != 'a' && b3n == 'a');
//// ISBLANK, ISIDCHAR ////
//extern __forceinline__ __device__ int isblank_(int c);
//extern __forceinline__ __device__ int isidchar_(int c);
bool c0 = isblank(' '); bool c0n = isblank('A'); assert(c0 && !c0n);
bool c1 = isidchar('a'); bool c1n = isidchar('A'); assert(c1 && c1n);
//bool d0 = ispoweroftwo(2); bool d0n = ispoweroftwo(3); assert(d0 && !d0n);
//bool d1 = isalpha2('a'); bool d1n = isalpha2('A'); assert(d1 && d1n);
}
cudaError_t ctype_test1() { g_ctype_test1<<<1, 1>>>(); return cudaDeviceSynchronize(); }
|
8cccbd2e9f93325fa30edfb5610b2845f9d3b918.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/parquet/parquet_gpu.hpp>
#include <io/utilities/block_utils.cuh>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <hipcub/hipcub.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s {
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[max_page_fragment_size];
};
/**
* @brief Computes a 16-bit dictionary hash
*/
inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; }
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) { v = __funnelshift_r(v, *p32, ofs); }
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); }
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after
*fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
*/
__device__ void FetchDictionaryFragment(dict_state_s *s,
uint32_t *dict_data,
uint32_t frag_start_row,
uint32_t t)
{
if (t == 0) s->frag = *s->cur_fragment;
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
template <int block_size>
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
using block_scan = hipcub::BlockScan<uint32_t, block_size>;
__shared__ typename block_scan::TempStorage temp_storage;
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid =
(i + t < s->row_cnt && row < s->col.num_rows) ? s->col.leaf_column->is_valid(row) : 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique =
(is_valid &&
dict_idx ==
row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t block_num_dict_entries;
uint32_t pos;
block_scan(temp_storage).ExclusiveSum(is_unique, pos, block_num_dict_entries);
pos += num_dict_entries;
num_dict_entries += block_num_dict_entries;
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for
// final dictionary duplicate elimination and once for re-ordering) (If something went wrong
// building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
__shared__ typename block_reduce::TempStorage temp_storage;
dict_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t == 0) s->ck = chunks[blockIdx.x];
__syncthreads();
if (!s->ck.has_dictionary) { return; }
if (t == 0) s->col = *s->ck.col_desc;
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.leaf_column);
} else if (dtype == INT96) {
dtype_len_in = 8;
} else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
__syncthreads();
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
auto str1 = s->col.leaf_column->element<string_view>(row);
len += str1.size_bytes();
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(str1.data()), str1.size_bytes());
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
auto const current = next - 1;
auto str2 = s->col.leaf_column->element<string_view>(current);
if (str1 == str2) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
} else {
uint64_t val;
if (dtype_len_in == 8) {
val = s->col.leaf_column->element<uint64_t>(row);
hash = uint64_hash16(val);
} else {
val = (dtype_len_in == 4)
? s->col.leaf_column->element<uint32_t>(row)
: (dtype_len_in == 2) ? s->col.leaf_column->element<uint16_t>(row)
: s->col.leaf_column->element<uint8_t>(row);
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
auto const current = next - 1;
uint64_t val2 = (dtype_len_in == 8)
? s->col.leaf_column->element<uint64_t>(current)
: (dtype_len_in == 4)
? s->col.leaf_column->element<uint32_t>(current)
: (dtype_len_in == 2)
? s->col.leaf_column->element<uint16_t>(current)
: s->col.leaf_column->element<uint8_t>(current);
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = block_reduce(temp_storage).Sum((is_valid && !is_dupe) ? len : 0);
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
} else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point, the dictionary order is non-deterministic, and we want insertion order
// Make sure that the non-duplicate entry corresponds to the lower row number
// (The entry in dict_data (next-1) used for duplicate elimination does not need
// to be the lowest row number)
bool reorder_check = (is_valid && is_dupe && next - 1 > row);
if (reorder_check) {
next = s->col.dict_index[next - 1];
while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; }
}
if (__syncthreads_or(reorder_check)) {
if (reorder_check) { atomicMin(&s->col.dict_index[next], row); }
__syncthreads();
if (reorder_check && s->col.dict_index[next] == row) {
s->col.dict_index[next] = row | (1u << 31);
s->col.dict_index[row] = row;
}
__syncthreads();
}
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 ||
(s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; }
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices<block_size>(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in,out] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*/
void BuildChunkDictionaries(EncColumnChunk *chunks,
uint32_t *dev_scratch,
size_t scratch_size,
uint32_t num_chunks,
rmm::cuda_stream_view stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
CUDA_TRY(hipMemsetAsync(dev_scratch, 0, scratch_size, stream.value()));
hipLaunchKernelGGL(( gpuBuildChunkDictionaries<1024>), dim3(num_chunks), dim3(1024), 0, stream.value(), chunks, dev_scratch);
}
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| 8cccbd2e9f93325fa30edfb5610b2845f9d3b918.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/parquet/parquet_gpu.hpp>
#include <io/utilities/block_utils.cuh>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <cub/cub.cuh>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s {
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[max_page_fragment_size];
};
/**
* @brief Computes a 16-bit dictionary hash
*/
inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; }
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) { v = __funnelshift_r(v, *p32, ofs); }
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); }
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after
*fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
*/
__device__ void FetchDictionaryFragment(dict_state_s *s,
uint32_t *dict_data,
uint32_t frag_start_row,
uint32_t t)
{
if (t == 0) s->frag = *s->cur_fragment;
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
template <int block_size>
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
using block_scan = cub::BlockScan<uint32_t, block_size>;
__shared__ typename block_scan::TempStorage temp_storage;
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid =
(i + t < s->row_cnt && row < s->col.num_rows) ? s->col.leaf_column->is_valid(row) : 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique =
(is_valid &&
dict_idx ==
row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t block_num_dict_entries;
uint32_t pos;
block_scan(temp_storage).ExclusiveSum(is_unique, pos, block_num_dict_entries);
pos += num_dict_entries;
num_dict_entries += block_num_dict_entries;
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for
// final dictionary duplicate elimination and once for re-ordering) (If something went wrong
// building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
__shared__ typename block_reduce::TempStorage temp_storage;
dict_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t == 0) s->ck = chunks[blockIdx.x];
__syncthreads();
if (!s->ck.has_dictionary) { return; }
if (t == 0) s->col = *s->ck.col_desc;
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.leaf_column);
} else if (dtype == INT96) {
dtype_len_in = 8;
} else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
__syncthreads();
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
auto str1 = s->col.leaf_column->element<string_view>(row);
len += str1.size_bytes();
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(str1.data()), str1.size_bytes());
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
auto const current = next - 1;
auto str2 = s->col.leaf_column->element<string_view>(current);
if (str1 == str2) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
} else {
uint64_t val;
if (dtype_len_in == 8) {
val = s->col.leaf_column->element<uint64_t>(row);
hash = uint64_hash16(val);
} else {
val = (dtype_len_in == 4)
? s->col.leaf_column->element<uint32_t>(row)
: (dtype_len_in == 2) ? s->col.leaf_column->element<uint16_t>(row)
: s->col.leaf_column->element<uint8_t>(row);
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
auto const current = next - 1;
uint64_t val2 = (dtype_len_in == 8)
? s->col.leaf_column->element<uint64_t>(current)
: (dtype_len_in == 4)
? s->col.leaf_column->element<uint32_t>(current)
: (dtype_len_in == 2)
? s->col.leaf_column->element<uint16_t>(current)
: s->col.leaf_column->element<uint8_t>(current);
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = block_reduce(temp_storage).Sum((is_valid && !is_dupe) ? len : 0);
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
} else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point, the dictionary order is non-deterministic, and we want insertion order
// Make sure that the non-duplicate entry corresponds to the lower row number
// (The entry in dict_data (next-1) used for duplicate elimination does not need
// to be the lowest row number)
bool reorder_check = (is_valid && is_dupe && next - 1 > row);
if (reorder_check) {
next = s->col.dict_index[next - 1];
while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; }
}
if (__syncthreads_or(reorder_check)) {
if (reorder_check) { atomicMin(&s->col.dict_index[next], row); }
__syncthreads();
if (reorder_check && s->col.dict_index[next] == row) {
s->col.dict_index[next] = row | (1u << 31);
s->col.dict_index[row] = row;
}
__syncthreads();
}
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 ||
(s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; }
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices<block_size>(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in,out] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*/
void BuildChunkDictionaries(EncColumnChunk *chunks,
uint32_t *dev_scratch,
size_t scratch_size,
uint32_t num_chunks,
rmm::cuda_stream_view stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
CUDA_TRY(cudaMemsetAsync(dev_scratch, 0, scratch_size, stream.value()));
gpuBuildChunkDictionaries<1024><<<num_chunks, 1024, 0, stream.value()>>>(chunks, dev_scratch);
}
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
7c223a4334a27f0fd12cbecdc576971677298195.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zmcsrcompressor_gpu.cu, normal z -> d, Mon Jun 25 18:24:26 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
// copy nonzeros into new structure
__global__ void
magma_dmcsrgpu_kernel1( int num_rows,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double zero = MAGMA_D_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_dmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr )
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_dmcsrgpu_kernel3( int num_rows,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
double zero = MAGMA_D_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param[in,out]
A magma_d_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmcsrcompressor_gpu(
magma_d_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_d_matrix B={Magma_CSR}, B2={Magma_CSR};
magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR};
magma_index_t *cputmp = NULL;
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 ));
CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 ));
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue );
dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) );
// copying the nonzeros into B and write in B.drow how many there are
hipLaunchKernelGGL(( magma_dmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() ,
A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
hipLaunchKernelGGL(( magma_dmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue->cuda_stream() ,
A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
CHECK( magma_index_malloc_cpu( &cputmp, 1 ));
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
CHECK( magma_dmalloc( &B.dval, A->nnz ));
CHECK( magma_index_malloc( &B.dcol, A->nnz ));
// copy correct values back
hipLaunchKernelGGL(( magma_dmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() ,
A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue ));
CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue ));
CHECK( magma_dmcsrcompressor_gpu( &dA, queue ));
magma_dmfree( &dA, queue );
magma_dmfree( A, queue );
CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue ));
CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue ));
magma_dmfree( &dA, queue );
magma_dmfree( &CSRA, queue );
}
cleanup:
magma_dmfree( &dA, queue );
magma_dmfree( &CSRA, queue );
magma_free( B2.drow );
magma_free( B.drow );
return info;
}
| 7c223a4334a27f0fd12cbecdc576971677298195.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/magma_zmcsrcompressor_gpu.cu, normal z -> d, Mon Jun 25 18:24:26 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
// copy nonzeros into new structure
__global__ void
magma_dmcsrgpu_kernel1( int num_rows,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double zero = MAGMA_D_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_dmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr )
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_dmcsrgpu_kernel3( int num_rows,
double *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
double *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
double zero = MAGMA_D_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param[in,out]
A magma_d_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmcsrcompressor_gpu(
magma_d_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_d_matrix B={Magma_CSR}, B2={Magma_CSR};
magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR};
magma_index_t *cputmp = NULL;
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 ));
CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 ));
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue );
dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) );
// copying the nonzeros into B and write in B.drow how many there are
magma_dmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>>
( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
magma_dmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue->cuda_stream() >>>
( A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
CHECK( magma_index_malloc_cpu( &cputmp, 1 ));
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
CHECK( magma_dmalloc( &B.dval, A->nnz ));
CHECK( magma_index_malloc( &B.dcol, A->nnz ));
// copy correct values back
magma_dmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>>
( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue ));
CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue ));
CHECK( magma_dmcsrcompressor_gpu( &dA, queue ));
magma_dmfree( &dA, queue );
magma_dmfree( A, queue );
CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue ));
CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue ));
magma_dmfree( &dA, queue );
magma_dmfree( &CSRA, queue );
}
cleanup:
magma_dmfree( &dA, queue );
magma_dmfree( &CSRA, queue );
magma_free( B2.drow );
magma_free( B.drow );
return info;
}
|
a1409c7e483a5e6c6930161acf6925f800db1efe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif
#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS)
#include "paddle/fluid/framework/data_feed.h"
#include <thrust/device_ptr.h>
#include <thrust/random.h>
#include <thrust/shuffle.h>
#include <sstream>
#include "hipcub/hipcub.hpp"
#if defined(PADDLE_WITH_PSCORE) && defined(PADDLE_WITH_GPU_GRAPH)
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_node.h"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"
#endif
#include "paddle/fluid/framework/fleet/heter_ps/hashtable.h"
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
#include "paddle/fluid/framework/io/fs.h"
#include "paddle/phi/kernels/gpu/graph_reindex_funcs.h"
#include "paddle/phi/kernels/graph_reindex_kernel.h"
DECLARE_bool(enable_opt_get_features);
DECLARE_bool(graph_metapath_split_opt);
DECLARE_int32(gpugraph_storage_mode);
DECLARE_double(gpugraph_hbm_table_load_factor);
namespace paddle {
namespace framework {
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
#define DEBUG_STATE(state) \
VLOG(2) << "left: " << state->left << " right: " << state->right \
<< " central_word: " << state->central_word \
<< " step: " << state->step << " cursor: " << state->cursor \
<< " len: " << state->len << " row_num: " << state->row_num; \
// CUDA: use 512 threads per block
const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void fill_idx(T *idx, size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
idx[i] = i;
}
}
/**
* @brief sort cub
*/
template <typename K, typename V>
void cub_sort_pairs(int len,
const K *in_keys,
K *out_keys,
const V *in_vals,
V *out_vals,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(NULL,
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
}
/**
* @Brief cub run length encode
*/
template <typename K, typename V, typename TNum>
void cub_runlength_encode(int N,
const K *in_keys,
K *out_keys,
V *out_sizes,
TNum *d_out_len,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode(NULL,
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
}
/**
* @brief exclusive sum
*/
template <typename K>
void cub_exclusivesum(int N,
const K *in,
K *out,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceScan::ExclusiveSum(
NULL, temp_storage_bytes, in, out, N, stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(hipcub::DeviceScan::ExclusiveSum(
d_buf_->ptr(), temp_storage_bytes, in, out, N, stream));
}
template <typename T>
__global__ void kernel_fill_restore_idx(size_t N,
const T *d_sorted_idx,
const T *d_offset,
const T *d_merged_cnts,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
const T &off = d_offset[i];
const T &num = d_merged_cnts[i];
for (size_t k = 0; k < num; k++) {
d_restore_idx[d_sorted_idx[off + k]] = i;
}
}
}
template <typename T>
__global__ void kernel_fill_restore_idx_by_search(size_t N,
const T *d_sorted_idx,
size_t merge_num,
const T *d_offset,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
if (i < d_offset[1]) {
d_restore_idx[d_sorted_idx[i]] = 0;
continue;
}
int high = merge_num - 1;
int low = 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < d_offset[mid + 1]) {
high = mid;
} else {
low = mid + 1;
}
}
d_restore_idx[d_sorted_idx[i]] = low;
}
}
// For unique node and inverse id.
int dedup_keys_and_fillidx(int total_nodes_num,
const uint64_t *d_keys,
uint64_t *d_merged_keys, // input
uint64_t *d_sorted_keys, // output
uint32_t *d_restore_idx, // inverse
uint32_t *d_sorted_idx,
uint32_t *d_offset,
uint32_t *d_merged_cnts,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
int merged_size = 0; // Final num
auto d_index_in =
memory::Alloc(place_,
sizeof(uint32_t) * (total_nodes_num + 1),
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
uint32_t *d_index_in_ptr = reinterpret_cast<uint32_t *>(d_index_in->ptr());
int *d_merged_size =
reinterpret_cast<int *>(&d_index_in_ptr[total_nodes_num]);
hipLaunchKernelGGL(( fill_idx), dim3(GET_BLOCKS(total_nodes_num)), dim3(CUDA_NUM_THREADS), 0, stream,
d_index_in_ptr, total_nodes_num);
cub_sort_pairs(total_nodes_num,
d_keys,
d_sorted_keys,
d_index_in_ptr,
d_sorted_idx,
stream,
d_buf_,
place_);
cub_runlength_encode(total_nodes_num,
d_sorted_keys,
d_merged_keys,
d_merged_cnts,
d_merged_size,
stream,
d_buf_,
place_);
CUDA_CHECK(hipMemcpyAsync(&merged_size,
d_merged_size,
sizeof(int),
hipMemcpyDeviceToHost,
stream));
CUDA_CHECK(hipStreamSynchronize(stream));
cub_exclusivesum(
merged_size, d_merged_cnts, d_offset, stream, d_buf_, place_);
if (total_nodes_num < merged_size * 2) {
hipLaunchKernelGGL(( kernel_fill_restore_idx), dim3(GET_BLOCKS(merged_size)),
dim3(CUDA_NUM_THREADS),
0,
stream,
merged_size, d_sorted_idx, d_offset, d_merged_cnts, d_restore_idx);
} else {
// used mid search fill idx when high dedup rate
hipLaunchKernelGGL(( kernel_fill_restore_idx_by_search), dim3(GET_BLOCKS(total_nodes_num)),
dim3(CUDA_NUM_THREADS),
0,
stream,
total_nodes_num, d_sorted_idx, merged_size, d_offset, d_restore_idx);
}
CUDA_CHECK(hipStreamSynchronize(stream));
return merged_size;
}
// fill slot values
__global__ void FillSlotValueOffsetKernel(const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(slot_idx, used_slot_num) {
int value_off = slot_idx * col_num;
slot_value_offsets[value_off] = 0;
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
for (int k = 0; k < ins_num; ++k) {
int pos = k * uint64_cols + info.slot_value_idx;
int num = uint64_offsets[pos + 1] - uint64_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
} else {
for (int k = 0; k < ins_num; ++k) {
int pos = k * float_cols + info.slot_value_idx;
int num = float_offsets[pos + 1] - float_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
}
}
}
void SlotRecordInMemoryDataFeed::FillSlotValueOffset(
const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots,
hipStream_t stream) {
hipLaunchKernelGGL(( FillSlotValueOffsetKernel), dim3(GET_BLOCKS(used_slot_num)),
dim3(CUDA_NUM_THREADS),
0,
stream, ins_num,
used_slot_num,
slot_value_offsets,
uint64_offsets,
uint64_slot_size,
float_offsets,
float_slot_size,
used_slots);
hipStreamSynchronize(stream);
}
__global__ void CopyForTensorKernel(const int used_slot_num,
const int ins_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(i, ins_num * used_slot_num) {
int slot_idx = i / ins_num;
int ins_idx = i % ins_num;
uint32_t value_offset = slot_value_offsets[slot_idx * col_num + ins_idx];
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
uint64_t *up = reinterpret_cast<uint64_t *>(dest[slot_idx]);
int index = info.slot_value_idx + uint64_cols * ins_idx;
int old_off = uint64_offsets[index];
int num = uint64_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int uint64_value_offset = uint64_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
up[k + value_offset] = uint64_feas[k + old_off + uint64_value_offset];
}
} else {
float *fp = reinterpret_cast<float *>(dest[slot_idx]);
int index = info.slot_value_idx + float_cols * ins_idx;
int old_off = float_offsets[index];
int num = float_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int float_value_offset = float_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
fp[k + value_offset] = float_feas[k + old_off + float_value_offset];
}
}
}
}
void SlotRecordInMemoryDataFeed::CopyForTensor(
const int ins_num,
const int used_slot_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots,
hipStream_t stream) {
hipLaunchKernelGGL(( CopyForTensorKernel), dim3(GET_BLOCKS(used_slot_num * ins_num)),
dim3(CUDA_NUM_THREADS),
0,
stream, used_slot_num,
ins_num,
dest,
slot_value_offsets,
uint64_feas,
uint64_offsets,
uint64_ins_lens,
uint64_slot_size,
float_feas,
float_offsets,
float_ins_lens,
float_slot_size,
used_slots);
hipStreamSynchronize(stream);
}
__global__ void GraphFillCVMKernel(int64_t *tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { tensor[idx] = 1; }
}
__global__ void CopyDuplicateKeys(int64_t *dist_tensor,
uint64_t *src_tensor,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
dist_tensor[idx * 2] = src_tensor[idx];
dist_tensor[idx * 2 + 1] = src_tensor[idx];
}
}
#if defined(PADDLE_WITH_PSCORE) && defined(PADDLE_WITH_GPU_GRAPH)
int GraphDataGenerator::AcquireInstance(BufState *state) {
if (state->GetNextStep()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextCentrolWord()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextBatch()) {
DEBUG_STATE(state);
return state->len;
}
return 0;
}
__global__ void GraphFillIdKernel(uint64_t *id_tensor,
int *fill_ins_num,
uint64_t *walk,
uint8_t *walk_ntype,
int *row,
int central_word,
int step,
int len,
int col_num,
uint8_t *excluded_train_pair,
int excluded_train_pair_len) {
__shared__ uint64_t local_key[CUDA_NUM_THREADS * 2];
__shared__ int local_num;
__shared__ int global_num;
bool need_filter = false;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
// int dst = idx * 2;
// id_tensor[dst] = walk[src];
// id_tensor[dst + 1] = walk[src + step];
if (idx < len) {
int src = row[idx] * col_num + central_word;
if (walk[src] != 0 && walk[src + step] != 0) {
for (int i = 0; i < excluded_train_pair_len; i += 2) {
if (walk_ntype[src] == excluded_train_pair[i] &&
walk_ntype[src + step] == excluded_train_pair[i + 1]) {
// filter this pair
need_filter = true;
break;
}
}
if (!need_filter) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst * 2] = walk[src];
local_key[dst * 2 + 1] = walk[src + step];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(fill_ins_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
id_tensor[global_num * 2 + 2 * threadIdx.x] = local_key[2 * threadIdx.x];
id_tensor[global_num * 2 + 2 * threadIdx.x + 1] =
local_key[2 * threadIdx.x + 1];
}
}
__global__ void GraphFillSlotKernel(uint64_t *id_tensor,
uint64_t *feature_buf,
int len,
int total_ins,
int slot_num,
int *slot_feature_num_map,
int fea_num_per_node,
int *actual_slot_id_map,
int *fea_offset_map) {
CUDA_KERNEL_LOOP(idx, len) {
int fea_idx = idx / total_ins;
int ins_idx = idx % total_ins;
int actual_slot_id = actual_slot_id_map[fea_idx];
int fea_offset = fea_offset_map[fea_idx];
reinterpret_cast<uint64_t *>(id_tensor[actual_slot_id])
[ins_idx * slot_feature_num_map[actual_slot_id] + fea_offset] =
feature_buf[ins_idx * fea_num_per_node + fea_idx];
}
}
__global__ void GraphFillSlotLodKernelOpt(uint64_t *id_tensor,
int len,
int total_ins,
int *slot_feature_num_map) {
CUDA_KERNEL_LOOP(idx, len) {
int slot_idx = idx / total_ins;
int ins_idx = idx % total_ins;
(reinterpret_cast<uint64_t *>(id_tensor[slot_idx]))[ins_idx] =
ins_idx * slot_feature_num_map[slot_idx];
}
}
__global__ void GraphFillSlotLodKernel(int64_t *id_tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { id_tensor[idx] = idx; }
}
// fill sage neighbor results
__global__ void FillActualNeighbors(int64_t *vals,
int64_t *actual_vals,
int64_t *actual_vals_dst,
int *actual_sample_size,
int *cumsum_actual_sample_size,
int sample_size,
int len,
int mod) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
int offset1 = cumsum_actual_sample_size[i];
int offset2 = sample_size * i;
int dst_id = i % mod;
for (int j = 0; j < actual_sample_size[i]; j++) {
actual_vals[offset1 + j] = vals[offset2 + j];
actual_vals_dst[offset1 + j] = dst_id;
}
}
}
int GraphDataGenerator::FillIdShowClkTensor(int total_instance,
bool gpu_graph_training,
size_t cursor) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({total_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({total_instance}, this->place_);
if (gpu_graph_training) {
uint64_t *ins_cursor, *ins_buf;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
hipMemcpyAsync(id_tensor_ptr_,
ins_cursor,
sizeof(uint64_t) * total_instance,
hipMemcpyDeviceToDevice,
train_stream_);
} else {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
hipLaunchKernelGGL(( CopyDuplicateKeys), dim3(GET_BLOCKS(total_instance / 2)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_,
id_tensor_ptr_, d_type_keys, total_instance / 2);
}
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(total_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, show_tensor_ptr_, total_instance);
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(total_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, clk_tensor_ptr_, total_instance);
return 0;
}
int GraphDataGenerator::FillGraphIdShowClkTensor(int uniq_instance,
int total_instance,
int index) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({uniq_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({uniq_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({uniq_instance}, this->place_);
int index_offset = 3 + slot_num_ * 2 + 5 * samples_.size();
index_tensor_ptr_ = feed_vec_[index_offset]->mutable_data<int>(
{total_instance}, this->place_);
if (get_degree_) {
degree_tensor_ptr_ = feed_vec_[index_offset + 1]->mutable_data<int>(
{uniq_instance * edge_to_id_len_}, this->place_);
}
int len_samples = samples_.size();
int *num_nodes_tensor_ptr_[len_samples];
int *next_num_nodes_tensor_ptr_[len_samples];
int64_t *edges_src_tensor_ptr_[len_samples];
int64_t *edges_dst_tensor_ptr_[len_samples];
int *edges_split_tensor_ptr_[len_samples];
std::vector<std::vector<int>> edges_split_num_for_graph =
edges_split_num_vec_[index];
std::vector<std::shared_ptr<phi::Allocation>> graph_edges =
graph_edges_vec_[index];
for (int i = 0; i < len_samples; i++) {
int offset = 3 + 2 * slot_num_ + 5 * i;
std::vector<int> edges_split_num = edges_split_num_for_graph[i];
int neighbor_len = edges_split_num[edge_to_id_len_ + 2];
num_nodes_tensor_ptr_[i] =
feed_vec_[offset]->mutable_data<int>({1}, this->place_);
next_num_nodes_tensor_ptr_[i] =
feed_vec_[offset + 1]->mutable_data<int>({1}, this->place_);
edges_src_tensor_ptr_[i] = feed_vec_[offset + 2]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_dst_tensor_ptr_[i] = feed_vec_[offset + 3]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_split_tensor_ptr_[i] = feed_vec_[offset + 4]->mutable_data<int>(
{edge_to_id_len_}, this->place_);
// [edges_split_num, next_num_nodes, num_nodes, neighbor_len]
hipMemcpyAsync(next_num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_,
sizeof(int),
hipMemcpyHostToDevice,
train_stream_);
hipMemcpyAsync(num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_ + 1,
sizeof(int),
hipMemcpyHostToDevice,
train_stream_);
hipMemcpyAsync(edges_split_tensor_ptr_[i],
edges_split_num.data(),
sizeof(int) * edge_to_id_len_,
hipMemcpyHostToDevice,
train_stream_);
hipMemcpyAsync(edges_src_tensor_ptr_[i],
graph_edges[i * 2]->ptr(),
sizeof(int64_t) * neighbor_len,
hipMemcpyDeviceToDevice,
train_stream_);
hipMemcpyAsync(edges_dst_tensor_ptr_[i],
graph_edges[i * 2 + 1]->ptr(),
sizeof(int64_t) * neighbor_len,
hipMemcpyDeviceToDevice,
train_stream_);
}
hipMemcpyAsync(id_tensor_ptr_,
final_sage_nodes_vec_[index]->ptr(),
sizeof(int64_t) * uniq_instance,
hipMemcpyDeviceToDevice,
train_stream_);
hipMemcpyAsync(index_tensor_ptr_,
inverse_vec_[index]->ptr(),
sizeof(int) * total_instance,
hipMemcpyDeviceToDevice,
train_stream_);
if (get_degree_) {
hipMemcpyAsync(degree_tensor_ptr_,
node_degree_vec_[index]->ptr(),
sizeof(int) * uniq_instance * edge_to_id_len_,
hipMemcpyDeviceToDevice,
train_stream_);
}
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(uniq_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, show_tensor_ptr_, uniq_instance);
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(uniq_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, clk_tensor_ptr_, uniq_instance);
return 0;
}
int GraphDataGenerator::FillGraphSlotFeature(
int total_instance,
bool gpu_graph_training,
std::shared_ptr<phi::Allocation> final_sage_nodes) {
uint64_t *ins_cursor, *ins_buf;
if (gpu_graph_training) {
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
} else {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
ins_cursor = reinterpret_cast<uint64_t *>(id_tensor_ptr_);
}
if (!sage_mode_) {
return FillSlotFeature(ins_cursor, total_instance);
} else {
uint64_t *sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
return FillSlotFeature(sage_nodes_ptr, total_instance);
}
}
int GraphDataGenerator::MakeInsPair(hipStream_t stream) {
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
uint8_t *walk_ntype = NULL;
uint8_t *excluded_train_pair = NULL;
if (excluded_train_pair_len_ > 0) {
walk_ntype = reinterpret_cast<uint8_t *>(d_walk_ntype_->ptr());
excluded_train_pair =
reinterpret_cast<uint8_t *>(d_excluded_train_pair_->ptr());
}
uint64_t *ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
int *random_row = reinterpret_cast<int *>(d_random_row_->ptr());
int *d_pair_num = reinterpret_cast<int *>(d_pair_num_->ptr());
hipMemsetAsync(d_pair_num, 0, sizeof(int), stream);
int len = buf_state_.len;
// make pair
hipLaunchKernelGGL(( GraphFillIdKernel), dim3(GET_BLOCKS(len)), dim3(CUDA_NUM_THREADS), 0, stream,
ins_buf + ins_buf_pair_len_ * 2,
d_pair_num,
walk,
walk_ntype,
random_row + buf_state_.cursor,
buf_state_.central_word,
window_step_[buf_state_.step],
len,
walk_len_,
excluded_train_pair,
excluded_train_pair_len_);
int h_pair_num;
hipMemcpyAsync(
&h_pair_num, d_pair_num, sizeof(int), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
ins_buf_pair_len_ += h_pair_num;
if (debug_mode_) {
uint64_t h_ins_buf[ins_buf_pair_len_ * 2]; // NOLINT
hipMemcpy(h_ins_buf,
ins_buf,
2 * ins_buf_pair_len_ * sizeof(uint64_t),
hipMemcpyDeviceToHost);
VLOG(2) << "h_pair_num = " << h_pair_num
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
for (int xx = 0; xx < ins_buf_pair_len_; xx++) {
VLOG(2) << "h_ins_buf: " << h_ins_buf[xx * 2] << ", "
<< h_ins_buf[xx * 2 + 1];
}
}
return ins_buf_pair_len_;
}
int GraphDataGenerator::FillInsBuf(hipStream_t stream) {
if (ins_buf_pair_len_ >= batch_size_) {
return batch_size_;
}
int total_instance = AcquireInstance(&buf_state_);
VLOG(2) << "total_ins: " << total_instance;
buf_state_.Debug();
if (total_instance == 0) {
return -1;
}
return MakeInsPair(stream);
}
int GraphDataGenerator::GenerateBatch() {
int total_instance = 0;
platform::CUDADeviceGuard guard(gpuid_);
int res = 0;
if (!gpu_graph_training_) {
// infer
if (!sage_mode_) {
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
VLOG(1) << "in graph_data generator:batch_size = " << batch_size_
<< " instance = " << total_instance;
total_instance *= 2;
if (total_instance == 0) {
return 0;
}
FillIdShowClkTensor(total_instance, gpu_graph_training_, cursor_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
} else {
// train
if (!sage_mode_) {
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(train_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
return 0;
} else {
break;
}
}
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
VLOG(2) << "total_instance: " << total_instance
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
FillIdShowClkTensor(total_instance, gpu_graph_training_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
}
if (slot_num_ > 0) {
if (!sage_mode_) {
FillGraphSlotFeature(total_instance, gpu_graph_training_);
} else {
FillGraphSlotFeature(uniq_instance_vec_[sage_batch_count_],
gpu_graph_training_,
final_sage_nodes_vec_[sage_batch_count_]);
}
}
offset_.clear();
offset_.push_back(0);
if (!sage_mode_) {
offset_.push_back(total_instance);
} else {
offset_.push_back(uniq_instance_vec_[sage_batch_count_]);
sage_batch_count_ += 1;
}
LoD lod{offset_};
feed_vec_[0]->set_lod(lod);
if (slot_num_ > 0) {
for (int i = 0; i < slot_num_; ++i) {
feed_vec_[3 + 2 * i]->set_lod(lod);
}
}
hipStreamSynchronize(train_stream_);
if (!gpu_graph_training_) return 1;
if (!sage_mode_) {
ins_buf_pair_len_ -= total_instance / 2;
}
return 1;
}
__global__ void GraphFillSampleKeysKernel(uint64_t *neighbors,
uint64_t *sample_keys,
int *prefix_sum,
int *sampleidx2row,
int *tmp_sampleidx2row,
int *actual_sample_size,
int cur_degree,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t offset = prefix_sum[idx] + k;
sample_keys[offset] = neighbors[idx * cur_degree + k];
tmp_sampleidx2row[offset] = sampleidx2row[idx] + k;
}
}
}
__global__ void GraphDoWalkKernel(uint64_t *neighbors,
uint64_t *walk,
uint8_t *walk_ntype,
int *d_prefix_sum,
int *actual_sample_size,
int cur_degree,
int step,
int len,
int *id_cnt,
int *sampleidx2row,
int col_size,
uint8_t edge_dst_id) {
CUDA_KERNEL_LOOP(i, len) {
for (int k = 0; k < actual_sample_size[i]; k++) {
// int idx = sampleidx2row[i];
size_t row = sampleidx2row[k + d_prefix_sum[i]];
// size_t row = idx * cur_degree + k;
size_t col = step;
size_t offset = (row * col_size + col);
walk[offset] = neighbors[i * cur_degree + k];
if (walk_ntype != NULL) {
walk_ntype[offset] = edge_dst_id;
}
}
}
}
// Fill keys to the first column of walk
__global__ void GraphFillFirstStepKernel(int *prefix_sum,
int *sampleidx2row,
uint64_t *walk,
uint8_t *walk_ntype,
uint64_t *keys,
uint8_t edge_src_id,
uint8_t edge_dst_id,
int len,
int walk_degree,
int col_size,
int *actual_sample_size,
uint64_t *neighbors,
uint64_t *sample_keys) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t row = prefix_sum[idx] + k;
sample_keys[row] = neighbors[idx * walk_degree + k];
sampleidx2row[row] = row;
size_t offset = col_size * row;
walk[offset] = keys[idx];
walk[offset + 1] = neighbors[idx * walk_degree + k];
if (walk_ntype != NULL) {
walk_ntype[offset] = edge_src_id;
walk_ntype[offset + 1] = edge_dst_id;
}
}
}
}
__global__ void get_each_ins_info(uint8_t *slot_list,
uint32_t *slot_size_list,
uint32_t *slot_size_prefix,
uint32_t *each_ins_slot_num,
uint32_t *each_ins_slot_num_inner_prefix,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
uint32_t slot_index = slot_size_prefix[i];
size_t each_ins_slot_index = i * slot_num;
for (int j = 0; j < slot_size_list[i]; j++) {
each_ins_slot_num[each_ins_slot_index + slot_list[slot_index + j]] += 1;
}
each_ins_slot_num_inner_prefix[each_ins_slot_index] = 0;
for (int j = 1; j < slot_num; j++) {
each_ins_slot_num_inner_prefix[each_ins_slot_index + j] =
each_ins_slot_num[each_ins_slot_index + j - 1] +
each_ins_slot_num_inner_prefix[each_ins_slot_index + j - 1];
}
}
}
__global__ void fill_slot_num(uint32_t *d_each_ins_slot_num_ptr,
uint64_t **d_ins_slot_num_vector_ptr,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
size_t d_each_index = i * slot_num;
for (int j = 0; j < slot_num; j++) {
d_ins_slot_num_vector_ptr[j][i] =
d_each_ins_slot_num_ptr[d_each_index + j];
}
}
}
__global__ void fill_slot_tensor(uint64_t *feature_list,
uint32_t *feature_size_prefixsum,
uint32_t *each_ins_slot_num_inner_prefix,
uint64_t *ins_slot_num,
int64_t *slot_lod_tensor,
int64_t *slot_tensor,
int slot,
int slot_num,
size_t node_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < node_num) {
size_t dst_index = slot_lod_tensor[i];
size_t src_index = feature_size_prefixsum[i] +
each_ins_slot_num_inner_prefix[slot_num * i + slot];
for (uint64_t j = 0; j < ins_slot_num[i]; j++) {
slot_tensor[dst_index + j] = feature_list[src_index + j];
}
}
}
__global__ void GetUniqueFeaNum(uint64_t *d_in,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
atomicAdd(&local_num, 1);
}
}
if (i == len - 1) {
atomicAdd(&local_num, 1);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(unique_num, local_num);
}
}
__global__ void UniqueFeature(uint64_t *d_in,
uint64_t *d_out,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_key[CUDA_NUM_THREADS];
__shared__ uint64_t local_num;
__shared__ uint64_t global_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
}
if (i == len - 1) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(unique_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
d_out[global_num + threadIdx.x] = local_key[threadIdx.x];
}
}
// Fill sample_res to the stepth column of walk
void GraphDataGenerator::FillOneStep(uint64_t *d_start_ids,
int etype_id,
uint64_t *walk,
uint8_t *walk_ntype,
int len,
NeighborSampleResult &sample_res,
int cur_degree,
int step,
int *len_per_row) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t node_id = gpu_graph_ptr->edge_to_node_map_[etype_id];
uint8_t edge_src_id = node_id >> 32;
uint8_t edge_dst_id = node_id;
size_t temp_storage_bytes = 0;
int *d_actual_sample_size = sample_res.actual_sample_size;
uint64_t *d_neighbors = sample_res.val;
int *d_prefix_sum = reinterpret_cast<int *>(d_prefix_sum_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
int *d_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[cur_sampleidx2row_]->ptr());
int *d_tmp_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[1 - cur_sampleidx2row_]->ptr());
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
hipStreamSynchronize(sample_stream_);
if (step == 1) {
hipLaunchKernelGGL(( GraphFillFirstStepKernel), dim3(GET_BLOCKS(len)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_, d_prefix_sum,
d_tmp_sampleidx2row,
walk,
walk_ntype,
d_start_ids,
edge_src_id,
edge_dst_id,
len,
walk_degree_,
walk_len_,
d_actual_sample_size,
d_neighbors,
d_sample_keys);
} else {
hipLaunchKernelGGL(( GraphFillSampleKeysKernel), dim3(GET_BLOCKS(len)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_, d_neighbors,
d_sample_keys,
d_prefix_sum,
d_sampleidx2row,
d_tmp_sampleidx2row,
d_actual_sample_size,
cur_degree,
len);
hipLaunchKernelGGL(( GraphDoWalkKernel), dim3(GET_BLOCKS(len)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
d_neighbors,
walk,
walk_ntype,
d_prefix_sum,
d_actual_sample_size,
cur_degree,
step,
len,
len_per_row,
d_tmp_sampleidx2row,
walk_len_,
edge_dst_id);
}
if (debug_mode_) {
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
int *h_prefix_sum = new int[len + 1];
int *h_actual_size = new int[len];
int *h_offset2idx = new int[once_max_sample_keynum];
hipMemcpy(h_offset2idx,
d_tmp_sampleidx2row,
once_max_sample_keynum * sizeof(int),
hipMemcpyDeviceToHost);
hipMemcpy(h_prefix_sum,
d_prefix_sum,
(len + 1) * sizeof(int),
hipMemcpyDeviceToHost);
for (int xx = 0; xx < once_max_sample_keynum; xx++) {
VLOG(2) << "h_offset2idx[" << xx << "]: " << h_offset2idx[xx];
}
for (int xx = 0; xx < len + 1; xx++) {
VLOG(2) << "h_prefix_sum[" << xx << "]: " << h_prefix_sum[xx];
}
delete[] h_prefix_sum;
delete[] h_actual_size;
delete[] h_offset2idx;
}
hipStreamSynchronize(sample_stream_);
cur_sampleidx2row_ = 1 - cur_sampleidx2row_;
}
int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk, size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
std::shared_ptr<phi::Allocation> d_feature_list;
std::shared_ptr<phi::Allocation> d_slot_list;
if (sage_mode_) {
size_t temp_storage_bytes = (key_num + 1) * sizeof(uint32_t);
if (d_feature_size_list_buf_ == NULL ||
d_feature_size_list_buf_->size() < temp_storage_bytes) {
d_feature_size_list_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
if (d_feature_size_prefixsum_buf_ == NULL ||
d_feature_size_prefixsum_buf_->size() < temp_storage_bytes) {
d_feature_size_prefixsum_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
}
uint32_t *d_feature_size_list_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_list_buf_->ptr());
uint32_t *d_feature_size_prefixsum_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_prefixsum_buf_->ptr());
int fea_num =
gpu_graph_ptr->get_feature_info_of_nodes(gpuid_,
d_walk,
key_num,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_feature_list,
d_slot_list);
int64_t *slot_tensor_ptr_[slot_num_];
int64_t *slot_lod_tensor_ptr_[slot_num_];
if (fea_num == 0) {
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(hipMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(int64_t), train_stream_));
CUDA_CHECK(hipMemsetAsync(slot_lod_tensor_ptr_[i],
0,
sizeof(int64_t) * key_num,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
hipMemcpyHostToDevice,
train_stream_));
}
CUDA_CHECK(hipStreamSynchronize(train_stream_));
return 0;
}
uint64_t *d_feature_list_ptr =
reinterpret_cast<uint64_t *>(d_feature_list->ptr());
uint8_t *d_slot_list_ptr = reinterpret_cast<uint8_t *>(d_slot_list->ptr());
std::shared_ptr<phi::Allocation> d_each_ins_slot_num_inner_prefix =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
std::shared_ptr<phi::Allocation> d_each_ins_slot_num =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
uint32_t *d_each_ins_slot_num_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num->ptr());
uint32_t *d_each_ins_slot_num_inner_prefix_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num_inner_prefix->ptr());
CUDA_CHECK(hipMemsetAsync(d_each_ins_slot_num_ptr,
0,
slot_num_ * key_num * sizeof(uint32_t),
train_stream_));
dim3 grid((key_num - 1) / 256 + 1);
dim3 block(1, 256);
hipLaunchKernelGGL(( get_each_ins_info), dim3(grid), dim3(block), 0, train_stream_,
d_slot_list_ptr,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
key_num,
slot_num_);
std::vector<std::shared_ptr<phi::Allocation>> ins_slot_num(slot_num_,
nullptr);
std::vector<uint64_t *> ins_slot_num_vecotr(slot_num_, NULL);
std::shared_ptr<phi::Allocation> d_ins_slot_num_vector =
memory::AllocShared(place_, (slot_num_) * sizeof(uint64_t *));
uint64_t **d_ins_slot_num_vector_ptr =
reinterpret_cast<uint64_t **>(d_ins_slot_num_vector->ptr());
for (int i = 0; i < slot_num_; i++) {
ins_slot_num[i] = memory::AllocShared(place_, key_num * sizeof(uint64_t));
ins_slot_num_vecotr[i] =
reinterpret_cast<uint64_t *>(ins_slot_num[i]->ptr());
}
CUDA_CHECK(
hipMemcpyAsync(reinterpret_cast<char *>(d_ins_slot_num_vector_ptr),
ins_slot_num_vecotr.data(),
sizeof(uint64_t *) * slot_num_,
hipMemcpyHostToDevice,
train_stream_));
hipLaunchKernelGGL(( fill_slot_num), dim3(grid), dim3(block), 0, train_stream_,
d_each_ins_slot_num_ptr, d_ins_slot_num_vector_ptr, key_num, slot_num_);
CUDA_CHECK(hipStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
}
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
ins_slot_num_vecotr[0],
slot_lod_tensor_ptr_[0] + 1,
key_num,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
auto d_temp_storage = memory::Alloc(
this->place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(train_stream_)));
std::vector<int64_t> each_slot_fea_num(slot_num_, 0);
for (int i = 0; i < slot_num_; ++i) {
CUDA_CHECK(hipMemsetAsync(
slot_lod_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i] + 1,
key_num,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(&each_slot_fea_num[i],
slot_lod_tensor_ptr_[i] + key_num,
sizeof(uint64_t),
hipMemcpyDeviceToHost,
train_stream_));
}
CUDA_CHECK(hipStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_tensor_ptr_[i] = feed_vec_[3 + 2 * i]->mutable_data<int64_t>(
{each_slot_fea_num[i], 1}, this->place_);
}
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
hipLaunchKernelGGL(( fill_slot_tensor), dim3(grid), dim3(block), 0, train_stream_,
d_feature_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i],
slot_tensor_ptr_[i],
i,
slot_num_,
key_num);
// trick for empty tensor
if (each_slot_fea_num[i] == 0) {
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(hipMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(hipMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
hipMemcpyHostToDevice,
train_stream_));
}
}
CUDA_CHECK(hipStreamSynchronize(train_stream_));
if (debug_mode_) {
std::vector<uint32_t> h_feature_size_list(key_num, 0);
std::vector<uint32_t> h_feature_size_list_prefixsum(key_num, 0);
std::vector<uint64_t> node_list(key_num, 0);
std::vector<uint64_t> h_feature_list(fea_num, 0);
std::vector<uint8_t> h_slot_list(fea_num, 0);
CUDA_CHECK(
hipMemcpyAsync(reinterpret_cast<char *>(h_feature_size_list.data()),
d_feature_size_list_ptr,
sizeof(uint32_t) * key_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(
reinterpret_cast<char *>(h_feature_size_list_prefixsum.data()),
d_feature_size_prefixsum_ptr,
sizeof(uint32_t) * key_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(node_list.data()),
d_walk,
sizeof(uint64_t) * key_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(h_feature_list.data()),
d_feature_list_ptr,
sizeof(uint64_t) * fea_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(h_slot_list.data()),
d_slot_list_ptr,
sizeof(uint8_t) * fea_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
for (size_t i = 0; i < key_num; i++) {
std::stringstream ss;
ss << "node_id: " << node_list[i]
<< " fea_num: " << h_feature_size_list[i] << " offset "
<< h_feature_size_list_prefixsum[i] << " slot: ";
for (uint32_t j = 0; j < h_feature_size_list[i]; j++) {
ss << int(h_slot_list[h_feature_size_list_prefixsum[i] + j]) << " : "
<< h_feature_list[h_feature_size_list_prefixsum[i] + j] << " ";
}
VLOG(0) << ss.str();
}
VLOG(0) << "all fea_num is " << fea_num << " calc fea_num is "
<< h_feature_size_list[key_num - 1] +
h_feature_size_list_prefixsum[key_num - 1];
for (int i = 0; i < slot_num_; ++i) {
std::vector<int64_t> h_slot_lod_tensor(key_num + 1, 0);
CUDA_CHECK(
hipMemcpyAsync(reinterpret_cast<char *>(h_slot_lod_tensor.data()),
slot_lod_tensor_ptr_[i],
sizeof(int64_t) * (key_num + 1),
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
std::stringstream ss_lod;
std::stringstream ss_tensor;
ss_lod << " slot " << i << " lod is [";
for (size_t j = 0; j < key_num + 1; j++) {
ss_lod << h_slot_lod_tensor[j] << ",";
}
ss_lod << "]";
std::vector<int64_t> h_slot_tensor(h_slot_lod_tensor[key_num], 0);
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(h_slot_tensor.data()),
slot_tensor_ptr_[i],
sizeof(int64_t) * h_slot_lod_tensor[key_num],
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
ss_tensor << " tensor is [ ";
for (size_t j = 0; j < h_slot_lod_tensor[key_num]; j++) {
ss_tensor << h_slot_tensor[j] << ",";
}
ss_tensor << "]";
VLOG(0) << ss_lod.str() << " " << ss_tensor.str();
}
}
return 0;
}
int GraphDataGenerator::FillFeatureBuf(uint64_t *d_walk,
uint64_t *d_feature,
size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
d_walk,
d_feature,
key_num,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
int GraphDataGenerator::FillFeatureBuf(
std::shared_ptr<phi::Allocation> d_walk,
std::shared_ptr<phi::Allocation> d_feature) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
reinterpret_cast<uint64_t *>(d_walk->ptr()),
reinterpret_cast<uint64_t *>(d_feature->ptr()),
buf_size_,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
// deepwalktable01
// sagetabletabletable
int GraphDataGenerator::InsertTable(
const uint64_t *d_keys,
uint64_t len,
std::shared_ptr<phi::Allocation> d_uniq_node_num) {
// Used under NOT WHOLE_HBM.
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node_num->ptr());
hipMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num_ptr,
sizeof(uint64_t),
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
if (gpu_graph_training_) {
VLOG(2) << "table capacity: " << train_table_cap_ << ", " << h_uniq_node_num
<< " used";
if (h_uniq_node_num + len >= train_table_cap_) {
if (!sage_mode_) {
return 1;
} else {
// Copy unique nodes first.
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
hipMemsetAsync(
d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
} else {
// used only for sage_mode.
if (h_uniq_node_num + len >= infer_table_cap_) {
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
hipMemsetAsync(d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
table_->insert(d_keys, len, d_uniq_node_num_ptr, sample_stream_);
CUDA_CHECK(hipStreamSynchronize(sample_stream_));
return 0;
}
std::vector<std::shared_ptr<phi::Allocation>>
GraphDataGenerator::SampleNeighbors(int64_t *uniq_nodes,
int len,
int sample_size,
std::vector<int> &edges_split_num,
int64_t *neighbor_len) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_all_edge_type(
gpuid_,
edge_to_id_len_,
reinterpret_cast<uint64_t *>(uniq_nodes),
sample_size,
len,
edge_type_graph_);
int *all_sample_count_ptr =
reinterpret_cast<int *>(sample_res.actual_sample_size_mem->ptr());
auto cumsum_actual_sample_size = memory::Alloc(
place_,
(len * edge_to_id_len_ + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *cumsum_actual_sample_size_ptr =
reinterpret_cast<int *>(cumsum_actual_sample_size->ptr());
hipMemsetAsync(cumsum_actual_sample_size_ptr,
0,
(len * edge_to_id_len_ + 1) * sizeof(int),
sample_stream_);
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
hipStreamSynchronize(sample_stream_);
edges_split_num.resize(edge_to_id_len_);
for (int i = 0; i < edge_to_id_len_; i++) {
hipMemcpyAsync(edges_split_num.data() + i,
cumsum_actual_sample_size_ptr + (i + 1) * len,
sizeof(int),
hipMemcpyDeviceToHost,
sample_stream_);
}
CUDA_CHECK(hipStreamSynchronize(sample_stream_));
int all_sample_size = edges_split_num[edge_to_id_len_ - 1];
auto final_sample_val = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sample_val_dst = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *final_sample_val_ptr =
reinterpret_cast<int64_t *>(final_sample_val->ptr());
int64_t *final_sample_val_dst_ptr =
reinterpret_cast<int64_t *>(final_sample_val_dst->ptr());
int64_t *all_sample_val_ptr =
reinterpret_cast<int64_t *>(sample_res.val_mem->ptr());
hipLaunchKernelGGL(( FillActualNeighbors), dim3(GET_BLOCKS(len * edge_to_id_len_)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_, all_sample_val_ptr,
final_sample_val_ptr,
final_sample_val_dst_ptr,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr,
sample_size,
len * edge_to_id_len_,
len);
*neighbor_len = all_sample_size;
hipStreamSynchronize(sample_stream_);
std::vector<std::shared_ptr<phi::Allocation>> sample_results;
sample_results.emplace_back(final_sample_val);
sample_results.emplace_back(final_sample_val_dst);
return sample_results;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::FillReindexHashTable(
int64_t *input,
int num_input,
int64_t len_hashtable,
int64_t *keys,
int *values,
int *key_index,
int *final_nodes_len) {
hipLaunchKernelGGL(( phi::BuildHashTable<int64_t>)
, dim3(GET_BLOCKS(num_input)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
input, num_input, len_hashtable, keys, key_index);
// Get item index count.
auto item_count = memory::Alloc(
place_,
(num_input + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *item_count_ptr = reinterpret_cast<int *>(item_count->ptr());
hipMemsetAsync(
item_count_ptr, 0, sizeof(int) * (num_input + 1), sample_stream_);
hipLaunchKernelGGL(( phi::GetItemIndexCount<int64_t>)
, dim3(GET_BLOCKS(num_input)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
input, item_count_ptr, num_input, len_hashtable, keys, key_index);
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(NULL,
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
hipcub::DeviceScan::ExclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
int total_unique_items = 0;
hipMemcpyAsync(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
auto unique_items = memory::AllocShared(
place_,
total_unique_items * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *unique_items_ptr = reinterpret_cast<int64_t *>(unique_items->ptr());
*final_nodes_len = total_unique_items;
// Get unique items
hipLaunchKernelGGL(( phi::FillUniqueItems<int64_t>)
, dim3(GET_BLOCKS(num_input)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
input,
num_input,
len_hashtable,
unique_items_ptr,
item_count_ptr,
keys,
values,
key_index);
hipStreamSynchronize(sample_stream_);
return unique_items;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GetReindexResult(
int64_t *reindex_src_data,
int64_t *center_nodes,
int *final_nodes_len,
int node_len,
int64_t neighbor_len) {
// Reset reindex table
int64_t *d_reindex_table_key_ptr =
reinterpret_cast<int64_t *>(d_reindex_table_key_->ptr());
int *d_reindex_table_value_ptr =
reinterpret_cast<int *>(d_reindex_table_value_->ptr());
int *d_reindex_table_index_ptr =
reinterpret_cast<int *>(d_reindex_table_index_->ptr());
// Fill table with -1.
hipMemsetAsync(d_reindex_table_key_ptr,
-1,
reindex_table_size_ * sizeof(int64_t),
sample_stream_);
hipMemsetAsync(d_reindex_table_value_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
hipMemsetAsync(d_reindex_table_index_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
auto all_nodes = memory::AllocShared(
place_,
(node_len + neighbor_len) * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *all_nodes_data = reinterpret_cast<int64_t *>(all_nodes->ptr());
hipMemcpyAsync(all_nodes_data,
center_nodes,
sizeof(int64_t) * node_len,
hipMemcpyDeviceToDevice,
sample_stream_);
hipMemcpyAsync(all_nodes_data + node_len,
reindex_src_data,
sizeof(int64_t) * neighbor_len,
hipMemcpyDeviceToDevice,
sample_stream_);
hipStreamSynchronize(sample_stream_);
auto final_nodes = FillReindexHashTable(all_nodes_data,
node_len + neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr,
d_reindex_table_index_ptr,
final_nodes_len);
hipLaunchKernelGGL(( phi::ReindexSrcOutput<int64_t>)
, dim3(GET_BLOCKS(neighbor_len)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
reindex_src_data,
neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr);
return final_nodes;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GenerateSampleGraph(
uint64_t *node_ids,
int len,
int *final_len,
std::shared_ptr<phi::Allocation> &inverse) {
VLOG(2) << "Get Unique Nodes";
auto uniq_nodes = memory::Alloc(
place_,
len * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *inverse_ptr = reinterpret_cast<int *>(inverse->ptr());
int64_t *uniq_nodes_data = reinterpret_cast<int64_t *>(uniq_nodes->ptr());
int uniq_len = dedup_keys_and_fillidx(
len,
node_ids,
reinterpret_cast<uint64_t *>(uniq_nodes_data),
reinterpret_cast<uint64_t *>(d_sorted_keys_->ptr()),
reinterpret_cast<uint32_t *>(inverse_ptr),
reinterpret_cast<uint32_t *>(d_sorted_idx_->ptr()),
reinterpret_cast<uint32_t *>(d_offset_->ptr()),
reinterpret_cast<uint32_t *>(d_merged_cnts_->ptr()),
sample_stream_,
d_buf_,
place_);
int len_samples = samples_.size();
VLOG(2) << "Sample Neighbors and Reindex";
std::vector<int> edges_split_num;
std::vector<std::shared_ptr<phi::Allocation>> final_nodes_vec;
std::vector<std::shared_ptr<phi::Allocation>> graph_edges;
std::vector<std::vector<int>> edges_split_num_for_graph;
std::vector<int> final_nodes_len_vec;
for (int i = 0; i < len_samples; i++) {
edges_split_num.clear();
std::shared_ptr<phi::Allocation> neighbors, reindex_dst;
int64_t neighbors_len = 0;
if (i == 0) {
auto sample_results = SampleNeighbors(uniq_nodes_data,
uniq_len,
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(uniq_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto sample_results = SampleNeighbors(final_nodes_data,
final_nodes_len_vec[i - 1],
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(final_nodes_len_vec[i - 1]);
}
int64_t *reindex_src_data = reinterpret_cast<int64_t *>(neighbors->ptr());
int final_nodes_len = 0;
if (i == 0) {
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
uniq_nodes_data,
&final_nodes_len,
uniq_len,
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
final_nodes_data,
&final_nodes_len,
final_nodes_len_vec[i - 1],
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
}
edges_split_num.emplace_back(
final_nodes_len_vec[i]); // [edges_split_num, next_num_nodes,
// num_nodes]
edges_split_num.emplace_back(neighbors_len);
graph_edges.emplace_back(neighbors);
graph_edges.emplace_back(reindex_dst);
edges_split_num_for_graph.emplace_back(edges_split_num);
}
graph_edges_vec_.emplace_back(graph_edges);
edges_split_num_vec_.emplace_back(edges_split_num_for_graph);
*final_len = final_nodes_len_vec[len_samples - 1];
return final_nodes_vec[len_samples - 1];
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GetNodeDegree(
uint64_t *node_ids, int len) {
auto node_degree = memory::AllocShared(
place_,
len * edge_to_id_len_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto edge_to_id = gpu_graph_ptr->edge_to_id;
for (auto &iter : edge_to_id) {
int edge_idx = iter.second;
gpu_graph_ptr->get_node_degree(
gpuid_, edge_idx, node_ids, len, node_degree);
}
return node_degree;
}
uint64_t GraphDataGenerator::CopyUniqueNodes() {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num =
reinterpret_cast<uint64_t *>(d_uniq_node_num_->ptr());
hipMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num,
sizeof(uint64_t),
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
auto d_uniq_node = memory::AllocShared(
place_,
h_uniq_node_num * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_uniq_node_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node->ptr());
auto d_node_cursor = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_node_cursor_ptr =
reinterpret_cast<uint64_t *>(d_node_cursor->ptr());
hipMemsetAsync(d_node_cursor_ptr, 0, sizeof(uint64_t), sample_stream_);
// uint64_t unused_key = std::numeric_limits<uint64_t>::max();
table_->get_keys(d_uniq_node_ptr, d_node_cursor_ptr, sample_stream_);
hipStreamSynchronize(sample_stream_);
host_vec_.resize(h_uniq_node_num + copy_unique_len_);
hipMemcpyAsync(host_vec_.data() + copy_unique_len_,
d_uniq_node_ptr,
sizeof(uint64_t) * h_uniq_node_num,
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
return h_uniq_node_num;
}
return 0;
}
void GraphDataGenerator::DoWalkandSage() {
int device_id = place_.GetDeviceId();
debug_gpu_memory_info(device_id, "DoWalkandSage start");
platform::CUDADeviceGuard guard(gpuid_);
if (gpu_graph_training_) {
// train
bool train_flag;
if (FLAGS_graph_metapath_split_opt) {
train_flag = FillWalkBufMultiPath();
} else {
train_flag = FillWalkBuf();
}
if (sage_mode_) {
sage_batch_num_ = 0;
if (train_flag) {
int total_instance = 0, uniq_instance = 0;
bool ins_pair_flag = true;
uint64_t *ins_buf, *ins_cursor;
while (ins_pair_flag) {
int res = 0;
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(sample_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
ins_pair_flag = false;
}
break;
}
}
if (!ins_pair_flag) {
break;
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
ins_cursor, total_instance, &uniq_instance, inverse);
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
if (get_degree_) {
auto node_degrees =
GetNodeDegree(final_sage_nodes_ptr, uniq_instance);
node_degree_vec_.emplace_back(node_degrees);
}
hipStreamSynchronize(sample_stream_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
ins_buf_pair_len_ -= total_instance / 2;
sage_batch_num_ += 1;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "train sage_batch_num: " << sage_batch_num_;
}
}
} else {
// infer
bool infer_flag = FillInferBuf();
if (sage_mode_) {
sage_batch_num_ = 0;
if (infer_flag) {
int total_instance = 0, uniq_instance = 0;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
while (total_instance != 0) {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor_]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
auto node_buf = memory::AllocShared(
place_,
total_instance * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *node_buf_ptr = reinterpret_cast<int64_t *>(node_buf->ptr());
hipLaunchKernelGGL(( CopyDuplicateKeys), dim3(GET_BLOCKS(total_instance / 2)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_,
node_buf_ptr, d_type_keys, total_instance / 2);
uint64_t *node_buf_ptr_ =
reinterpret_cast<uint64_t *>(node_buf->ptr());
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
node_buf_ptr_, total_instance, &uniq_instance, inverse);
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
if (get_degree_) {
auto node_degrees =
GetNodeDegree(final_sage_nodes_ptr, uniq_instance);
node_degree_vec_.emplace_back(node_degrees);
}
hipStreamSynchronize(sample_stream_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
sage_batch_num_ += 1;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "infer sage_batch_num: " << sage_batch_num_;
}
}
}
debug_gpu_memory_info(device_id, "DoWalkandSage end");
}
void GraphDataGenerator::clear_gpu_mem() {
d_len_per_row_.reset();
d_sample_keys_.reset();
d_prefix_sum_.reset();
for (size_t i = 0; i < d_sampleidx2rows_.size(); i++) {
d_sampleidx2rows_[i].reset();
}
delete table_;
if (sage_mode_) {
d_reindex_table_key_.reset();
d_reindex_table_value_.reset();
d_reindex_table_index_.reset();
d_sorted_keys_.reset();
d_sorted_idx_.reset();
d_offset_.reset();
d_merged_cnts_.reset();
}
}
int GraphDataGenerator::FillInferBuf() {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &global_infer_node_type_start =
gpu_graph_ptr->global_infer_node_type_start_[gpuid_];
auto &infer_cursor = gpu_graph_ptr->infer_cursor_[thread_id_];
total_row_ = 0;
if (infer_cursor < h_device_keys_len_.size()) {
if (global_infer_node_type_start[infer_cursor] >=
h_device_keys_len_[infer_cursor]) {
infer_cursor++;
if (infer_cursor >= h_device_keys_len_.size()) {
return 0;
}
}
if (!infer_node_type_index_set_.empty()) {
while (infer_cursor < h_device_keys_len_.size()) {
if (infer_node_type_index_set_.find(infer_cursor) ==
infer_node_type_index_set_.end()) {
VLOG(2) << "Skip cursor[" << infer_cursor << "]";
infer_cursor++;
continue;
} else {
VLOG(2) << "Not skip cursor[" << infer_cursor << "]";
break;
}
}
if (infer_cursor >= h_device_keys_len_.size()) {
return 0;
}
}
size_t device_key_size = h_device_keys_len_[infer_cursor];
total_row_ =
(global_infer_node_type_start[infer_cursor] + infer_table_cap_ <=
device_key_size)
? infer_table_cap_
: device_key_size - global_infer_node_type_start[infer_cursor];
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[infer_cursor]->ptr());
if (!sage_mode_) {
host_vec_.resize(total_row_);
hipMemcpyAsync(host_vec_.data(),
d_type_keys + global_infer_node_type_start[infer_cursor],
sizeof(uint64_t) * total_row_,
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
}
VLOG(1) << "cursor: " << infer_cursor
<< " start: " << global_infer_node_type_start[infer_cursor]
<< " num: " << total_row_;
infer_node_start_ = global_infer_node_type_start[infer_cursor];
global_infer_node_type_start[infer_cursor] += total_row_;
infer_node_end_ = global_infer_node_type_start[infer_cursor];
cursor_ = infer_cursor;
}
return 1;
}
void GraphDataGenerator::ClearSampleState() {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
finish_node_type.clear();
for (auto iter = node_type_start.begin(); iter != node_type_start.end();
iter++) {
iter->second = 0;
}
}
int GraphDataGenerator::FillWalkBuf() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
hipMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
uint8_t *walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
walk_ntype = reinterpret_cast<uint8_t *>(d_walk_ntype_->ptr());
hipMemsetAsync(walk_ntype, 0, buf_size_ * sizeof(uint8_t), sample_stream_);
}
// hipMemsetAsync(
// len_per_row, 0, once_max_sample_keynum * sizeof(int), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
//
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
auto &cursor = gpu_graph_ptr->cursor_[thread_id_];
size_t node_type_len = first_node_type.size();
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
int cur_node_idx = cursor % node_type_len;
int node_type = first_node_type[cur_node_idx];
auto &path = meta_path[cur_node_idx];
size_t start = node_type_start[node_type];
VLOG(2) << "cur_node_idx = " << cur_node_idx
<< " meta_path.size = " << meta_path.size();
// auto node_query_result = gpu_graph_ptr->query_node_list(
// gpuid_, node_type, start, once_sample_startid_len_);
// int tmp_len = node_query_result.actual_sample_size;
VLOG(2) << "choose start type: " << node_type;
int type_index = type_to_index[node_type];
size_t device_key_size = h_device_keys_len_[type_index];
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[type_index]->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
finish_node_type.insert(node_type);
if (finish_node_type.size() == node_type_start.size()) {
cursor = 0;
epoch_finish_ = true;
break;
}
cursor += 1;
continue;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
uint8_t *cur_walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
cur_walk_ntype = walk_ntype + i;
}
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< " cursor = " << node_type << " cur_node_idx = " << cur_node_idx
<< " jump row: " << jump_rows_;
VLOG(2) << "jump_row: " << jump_rows_;
if (jump_rows_ == 0) {
node_type_start[node_type] = tmp_len + start;
cursor += 1;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
path[0],
cur_walk,
cur_walk_ntype,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
edge_type_id,
cur_walk,
cur_walk_ntype,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
//
if (update == true) {
node_type_start[node_type] = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
cursor += 1;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::hip::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
hipStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
hipMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
hipMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
} else {
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
int GraphDataGenerator::FillWalkBufMultiPath() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
uint8_t *walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
walk_ntype = reinterpret_cast<uint8_t *>(d_walk_ntype_->ptr());
}
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
hipMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
//
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &cur_metapath = gpu_graph_ptr->cur_metapath_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &path = gpu_graph_ptr->cur_parse_metapath_;
auto &cur_metapath_start = gpu_graph_ptr->cur_metapath_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
size_t node_type_len = first_node_type.size();
std::string first_node =
paddle::string::split_string<std::string>(cur_metapath, "2")[0];
auto it = gpu_graph_ptr->node_to_id.find(first_node);
auto node_type = it->second;
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
size_t start = cur_metapath_start;
size_t device_key_size = h_train_metapath_keys_len_;
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_train_metapath_keys_->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
break;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
uint8_t *cur_walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
cur_walk_ntype = walk_ntype + i;
}
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< "jump row: " << jump_rows_;
if (jump_rows_ == 0) {
cur_metapath_start = tmp_len + start;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
path[0],
cur_walk,
cur_walk_ntype,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
edge_type_id,
cur_walk,
cur_walk_ntype,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
//
if (update == true) {
cur_metapath_start = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::hip::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
hipStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
hipMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
hipMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", h_uniq_node_num:" << h_uniq_node_num
<< ", total_samples:" << total_samples;
} else {
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
void GraphDataGenerator::SetFeedVec(std::vector<phi::DenseTensor *> feed_vec) {
feed_vec_ = feed_vec;
}
void GraphDataGenerator::AllocResource(
int thread_id, std::vector<phi::DenseTensor *> feed_vec) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
gpuid_ = gpu_graph_ptr->device_id_mapping[thread_id];
thread_id_ = thread_id;
place_ = platform::CUDAPlace(gpuid_);
debug_gpu_memory_info(gpuid_, "AllocResource start");
platform::CUDADeviceGuard guard(gpuid_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (gpu_graph_training_) {
table_ = new HashTable<uint64_t, uint64_t>(
train_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
} else {
table_ = new HashTable<uint64_t, uint64_t>(
infer_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
}
}
VLOG(1) << "AllocResource gpuid " << gpuid_
<< " feed_vec.size: " << feed_vec.size()
<< " table cap: " << train_table_cap_;
sample_stream_ = gpu_graph_ptr->get_local_stream(gpuid_);
train_stream_ = dynamic_cast<phi::GPUContext *>(
platform::DeviceContextPool::Instance().Get(place_))
->stream();
// feed_vec_ = feed_vec;
if (!sage_mode_) {
slot_num_ = (feed_vec.size() - 3) / 2;
} else {
slot_num_ = (feed_vec.size() - 4 - samples_.size() * 5) / 2;
}
// infer_node_type_start_ = std::vector<int>(h_device_keys_.size(), 0);
// for (size_t i = 0; i < h_device_keys_.size(); i++) {
// for (size_t j = 0; j < h_device_keys_[i]->size(); j++) {
// VLOG(3) << "h_device_keys_[" << i << "][" << j
// << "] = " << (*(h_device_keys_[i]))[j];
// }
// auto buf = memory::AllocShared(
// place_, h_device_keys_[i]->size() * sizeof(uint64_t));
// d_device_keys_.push_back(buf);
// CUDA_CHECK(hipMemcpyAsync(buf->ptr(),
// h_device_keys_[i]->data(),
// h_device_keys_[i]->size() * sizeof(uint64_t),
// hipMemcpyHostToDevice,
// stream_));
// }
if (gpu_graph_training_ && FLAGS_graph_metapath_split_opt) {
d_train_metapath_keys_ =
gpu_graph_ptr->d_graph_train_total_keys_[thread_id];
h_train_metapath_keys_len_ =
gpu_graph_ptr->h_graph_train_keys_len_[thread_id];
VLOG(2) << "h train metapaths key len: " << h_train_metapath_keys_len_;
} else {
auto &d_graph_all_type_keys = gpu_graph_ptr->d_graph_all_type_total_keys_;
auto &h_graph_all_type_keys_len = gpu_graph_ptr->h_graph_all_type_keys_len_;
for (size_t i = 0; i < d_graph_all_type_keys.size(); i++) {
d_device_keys_.push_back(d_graph_all_type_keys[i][thread_id]);
h_device_keys_len_.push_back(h_graph_all_type_keys_len[i][thread_id]);
}
VLOG(2) << "h_device_keys size: " << h_device_keys_len_.size();
}
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
d_prefix_sum_ = memory::AllocShared(
place_,
(once_max_sample_keynum + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *d_prefix_sum_ptr = reinterpret_cast<int *>(d_prefix_sum_->ptr());
hipMemsetAsync(d_prefix_sum_ptr,
0,
(once_max_sample_keynum + 1) * sizeof(int),
sample_stream_);
cursor_ = 0;
jump_rows_ = 0;
d_uniq_node_num_ = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
hipMemsetAsync(d_uniq_node_num_->ptr(), 0, sizeof(uint64_t), sample_stream_);
d_walk_ = memory::AllocShared(
place_,
buf_size_ * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
hipMemsetAsync(
d_walk_->ptr(), 0, buf_size_ * sizeof(uint64_t), sample_stream_);
excluded_train_pair_len_ = gpu_graph_ptr->excluded_train_pair_.size();
if (excluded_train_pair_len_ > 0) {
d_excluded_train_pair_ = memory::AllocShared(
place_,
excluded_train_pair_len_ * sizeof(uint8_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(hipMemcpyAsync(d_excluded_train_pair_->ptr(),
gpu_graph_ptr->excluded_train_pair_.data(),
excluded_train_pair_len_ * sizeof(uint8_t),
hipMemcpyHostToDevice,
sample_stream_));
d_walk_ntype_ = memory::AllocShared(
place_,
buf_size_ * sizeof(uint8_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
hipMemsetAsync(
d_walk_ntype_->ptr(), 0, buf_size_ * sizeof(uint8_t), sample_stream_);
}
d_sample_keys_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
cur_sampleidx2row_ = 0;
d_len_per_row_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
for (int i = -window_; i < 0; i++) {
window_step_.push_back(i);
}
for (int i = 0; i < window_; i++) {
window_step_.push_back(i + 1);
}
buf_state_.Init(batch_size_, walk_len_, &window_step_);
d_random_row_ = memory::AllocShared(
place_,
(once_sample_startid_len_ * walk_degree_ * repeat_time_) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
shuffle_seed_ = 0;
ins_buf_pair_len_ = 0;
if (!sage_mode_) {
d_ins_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 * 2) * sizeof(uint64_t));
d_pair_num_ = memory::AllocShared(place_, sizeof(int));
} else {
d_ins_buf_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_pair_num_ = memory::AllocShared(
place_,
sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
d_slot_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
d_slot_lod_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
if (sage_mode_) {
reindex_table_size_ = batch_size_ * 2;
// get hashtable size
for (int i = 0; i < samples_.size(); i++) {
reindex_table_size_ *= (samples_[i] * edge_to_id_len_ + 1);
}
int64_t next_pow2 =
1 << static_cast<size_t>(1 + std::log2(reindex_table_size_ >> 1));
reindex_table_size_ = next_pow2 << 1;
d_reindex_table_key_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_value_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_index_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
edge_type_graph_ =
gpu_graph_ptr->get_edge_type_graph(gpuid_, edge_to_id_len_);
d_sorted_keys_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sorted_idx_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_offset_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_merged_cnts_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
// parse infer_node_type
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
if (!gpu_graph_training_) {
auto node_types =
paddle::string::split_string<std::string>(infer_node_type_, ";");
auto node_to_id = gpu_graph_ptr->node_to_id;
for (auto &type : node_types) {
auto iter = node_to_id.find(type);
PADDLE_ENFORCE_NE(
iter,
node_to_id.end(),
platform::errors::NotFound("(%s) is not found in node_to_id.", type));
int node_type = iter->second;
int type_index = type_to_index[node_type];
VLOG(2) << "add node[" << type
<< "] into infer_node_type, type_index(cursor)[" << type_index
<< "]";
infer_node_type_index_set_.insert(type_index);
}
VLOG(2) << "infer_node_type_index_set_num: "
<< infer_node_type_index_set_.size();
}
hipStreamSynchronize(sample_stream_);
debug_gpu_memory_info(gpuid_, "AllocResource end");
}
void GraphDataGenerator::AllocTrainResource(int thread_id) {
if (slot_num_ > 0) {
platform::CUDADeviceGuard guard(gpuid_);
if (!sage_mode_) {
d_feature_size_list_buf_ =
memory::AllocShared(place_, (batch_size_ * 2) * sizeof(uint32_t));
d_feature_size_prefixsum_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 + 1) * sizeof(uint32_t));
} else {
d_feature_size_list_buf_ = NULL;
d_feature_size_prefixsum_buf_ = NULL;
}
}
}
void GraphDataGenerator::SetConfig(
const paddle::framework::DataFeedDesc &data_feed_desc) {
auto graph_config = data_feed_desc.graph_config();
walk_degree_ = graph_config.walk_degree();
walk_len_ = graph_config.walk_len();
window_ = graph_config.window();
once_sample_startid_len_ = graph_config.once_sample_startid_len();
debug_mode_ = graph_config.debug_mode();
gpu_graph_training_ = graph_config.gpu_graph_training();
if (debug_mode_ || !gpu_graph_training_) {
batch_size_ = graph_config.batch_size();
} else {
batch_size_ = once_sample_startid_len_;
}
repeat_time_ = graph_config.sample_times_one_chunk();
buf_size_ =
once_sample_startid_len_ * walk_len_ * walk_degree_ * repeat_time_;
train_table_cap_ = graph_config.train_table_cap();
infer_table_cap_ = graph_config.infer_table_cap();
get_degree_ = graph_config.get_degree();
epoch_finish_ = false;
VLOG(1) << "Confirm GraphConfig, walk_degree : " << walk_degree_
<< ", walk_len : " << walk_len_ << ", window : " << window_
<< ", once_sample_startid_len : " << once_sample_startid_len_
<< ", sample_times_one_chunk : " << repeat_time_
<< ", batch_size: " << batch_size_
<< ", train_table_cap: " << train_table_cap_
<< ", infer_table_cap: " << infer_table_cap_;
std::string first_node_type = graph_config.first_node_type();
std::string meta_path = graph_config.meta_path();
sage_mode_ = graph_config.sage_mode();
std::string str_samples = graph_config.samples();
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
debug_gpu_memory_info("init_conf start");
gpu_graph_ptr->init_conf(
first_node_type, meta_path, graph_config.excluded_train_pair());
debug_gpu_memory_info("init_conf end");
auto edge_to_id = gpu_graph_ptr->edge_to_id;
edge_to_id_len_ = edge_to_id.size();
sage_batch_count_ = 0;
auto samples = paddle::string::split_string<std::string>(str_samples, ";");
for (size_t i = 0; i < samples.size(); i++) {
int sample_size = std::stoi(samples[i]);
samples_.emplace_back(sample_size);
}
copy_unique_len_ = 0;
if (!gpu_graph_training_) {
infer_node_type_ = graph_config.infer_node_type();
}
}
#endif
void GraphDataGenerator::DumpWalkPath(std::string dump_path, size_t dump_rate) {
#ifdef _LINUX
PADDLE_ENFORCE_LT(
dump_rate,
10000000,
platform::errors::InvalidArgument(
"dump_rate can't be large than 10000000. Please check the dump "
"rate[1, 10000000]"));
PADDLE_ENFORCE_GT(dump_rate,
1,
platform::errors::InvalidArgument(
"dump_rate can't be less than 1. Please check "
"the dump rate[1, 10000000]"));
int err_no = 0;
std::shared_ptr<FILE> fp = fs_open_append_write(dump_path, &err_no, "");
uint64_t *h_walk = new uint64_t[buf_size_];
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
VLOG(1) << "DumpWalkPath all buf_size_:" << buf_size_;
std::string ss = "";
size_t write_count = 0;
for (int xx = 0; xx < buf_size_ / dump_rate; xx += walk_len_) {
ss = "";
for (int yy = 0; yy < walk_len_; yy++) {
ss += std::to_string(h_walk[xx + yy]) + "-";
}
write_count = fwrite_unlocked(ss.data(), 1, ss.length(), fp.get());
if (write_count != ss.length()) {
VLOG(1) << "dump walk path" << ss << " failed";
}
write_count = fwrite_unlocked("\n", 1, 1, fp.get());
}
#endif
}
} // namespace framework
} // namespace paddle
#endif
| a1409c7e483a5e6c6930161acf6925f800db1efe.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif
#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS)
#include "paddle/fluid/framework/data_feed.h"
#include <thrust/device_ptr.h>
#include <thrust/random.h>
#include <thrust/shuffle.h>
#include <sstream>
#include "cub/cub.cuh"
#if defined(PADDLE_WITH_PSCORE) && defined(PADDLE_WITH_GPU_GRAPH)
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_node.h"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"
#endif
#include "paddle/fluid/framework/fleet/heter_ps/hashtable.h"
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
#include "paddle/fluid/framework/io/fs.h"
#include "paddle/phi/kernels/gpu/graph_reindex_funcs.h"
#include "paddle/phi/kernels/graph_reindex_kernel.h"
DECLARE_bool(enable_opt_get_features);
DECLARE_bool(graph_metapath_split_opt);
DECLARE_int32(gpugraph_storage_mode);
DECLARE_double(gpugraph_hbm_table_load_factor);
namespace paddle {
namespace framework {
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
#define DEBUG_STATE(state) \
VLOG(2) << "left: " << state->left << " right: " << state->right \
<< " central_word: " << state->central_word \
<< " step: " << state->step << " cursor: " << state->cursor \
<< " len: " << state->len << " row_num: " << state->row_num; \
// CUDA: use 512 threads per block
const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void fill_idx(T *idx, size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
idx[i] = i;
}
}
/**
* @brief sort cub
*/
template <typename K, typename V>
void cub_sort_pairs(int len,
const K *in_keys,
K *out_keys,
const V *in_vals,
V *out_vals,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceRadixSort::SortPairs(NULL,
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(cub::DeviceRadixSort::SortPairs(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
}
/**
* @Brief cub run length encode
*/
template <typename K, typename V, typename TNum>
void cub_runlength_encode(int N,
const K *in_keys,
K *out_keys,
V *out_sizes,
TNum *d_out_len,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceRunLengthEncode::Encode(NULL,
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(cub::DeviceRunLengthEncode::Encode(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
}
/**
* @brief exclusive sum
*/
template <typename K>
void cub_exclusivesum(int N,
const K *in,
K *out,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceScan::ExclusiveSum(
NULL, temp_storage_bytes, in, out, N, stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(cub::DeviceScan::ExclusiveSum(
d_buf_->ptr(), temp_storage_bytes, in, out, N, stream));
}
template <typename T>
__global__ void kernel_fill_restore_idx(size_t N,
const T *d_sorted_idx,
const T *d_offset,
const T *d_merged_cnts,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
const T &off = d_offset[i];
const T &num = d_merged_cnts[i];
for (size_t k = 0; k < num; k++) {
d_restore_idx[d_sorted_idx[off + k]] = i;
}
}
}
template <typename T>
__global__ void kernel_fill_restore_idx_by_search(size_t N,
const T *d_sorted_idx,
size_t merge_num,
const T *d_offset,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
if (i < d_offset[1]) {
d_restore_idx[d_sorted_idx[i]] = 0;
continue;
}
int high = merge_num - 1;
int low = 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < d_offset[mid + 1]) {
high = mid;
} else {
low = mid + 1;
}
}
d_restore_idx[d_sorted_idx[i]] = low;
}
}
// For unique node and inverse id.
int dedup_keys_and_fillidx(int total_nodes_num,
const uint64_t *d_keys,
uint64_t *d_merged_keys, // input
uint64_t *d_sorted_keys, // output
uint32_t *d_restore_idx, // inverse
uint32_t *d_sorted_idx,
uint32_t *d_offset,
uint32_t *d_merged_cnts,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
int merged_size = 0; // Final num
auto d_index_in =
memory::Alloc(place_,
sizeof(uint32_t) * (total_nodes_num + 1),
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
uint32_t *d_index_in_ptr = reinterpret_cast<uint32_t *>(d_index_in->ptr());
int *d_merged_size =
reinterpret_cast<int *>(&d_index_in_ptr[total_nodes_num]);
fill_idx<<<GET_BLOCKS(total_nodes_num), CUDA_NUM_THREADS, 0, stream>>>(
d_index_in_ptr, total_nodes_num);
cub_sort_pairs(total_nodes_num,
d_keys,
d_sorted_keys,
d_index_in_ptr,
d_sorted_idx,
stream,
d_buf_,
place_);
cub_runlength_encode(total_nodes_num,
d_sorted_keys,
d_merged_keys,
d_merged_cnts,
d_merged_size,
stream,
d_buf_,
place_);
CUDA_CHECK(cudaMemcpyAsync(&merged_size,
d_merged_size,
sizeof(int),
cudaMemcpyDeviceToHost,
stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
cub_exclusivesum(
merged_size, d_merged_cnts, d_offset, stream, d_buf_, place_);
if (total_nodes_num < merged_size * 2) {
kernel_fill_restore_idx<<<GET_BLOCKS(merged_size),
CUDA_NUM_THREADS,
0,
stream>>>(
merged_size, d_sorted_idx, d_offset, d_merged_cnts, d_restore_idx);
} else {
// used mid search fill idx when high dedup rate
kernel_fill_restore_idx_by_search<<<GET_BLOCKS(total_nodes_num),
CUDA_NUM_THREADS,
0,
stream>>>(
total_nodes_num, d_sorted_idx, merged_size, d_offset, d_restore_idx);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
return merged_size;
}
// fill slot values
__global__ void FillSlotValueOffsetKernel(const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(slot_idx, used_slot_num) {
int value_off = slot_idx * col_num;
slot_value_offsets[value_off] = 0;
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
for (int k = 0; k < ins_num; ++k) {
int pos = k * uint64_cols + info.slot_value_idx;
int num = uint64_offsets[pos + 1] - uint64_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
} else {
for (int k = 0; k < ins_num; ++k) {
int pos = k * float_cols + info.slot_value_idx;
int num = float_offsets[pos + 1] - float_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
}
}
}
void SlotRecordInMemoryDataFeed::FillSlotValueOffset(
const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots,
cudaStream_t stream) {
FillSlotValueOffsetKernel<<<GET_BLOCKS(used_slot_num),
CUDA_NUM_THREADS,
0,
stream>>>(ins_num,
used_slot_num,
slot_value_offsets,
uint64_offsets,
uint64_slot_size,
float_offsets,
float_slot_size,
used_slots);
cudaStreamSynchronize(stream);
}
__global__ void CopyForTensorKernel(const int used_slot_num,
const int ins_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(i, ins_num * used_slot_num) {
int slot_idx = i / ins_num;
int ins_idx = i % ins_num;
uint32_t value_offset = slot_value_offsets[slot_idx * col_num + ins_idx];
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
uint64_t *up = reinterpret_cast<uint64_t *>(dest[slot_idx]);
int index = info.slot_value_idx + uint64_cols * ins_idx;
int old_off = uint64_offsets[index];
int num = uint64_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int uint64_value_offset = uint64_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
up[k + value_offset] = uint64_feas[k + old_off + uint64_value_offset];
}
} else {
float *fp = reinterpret_cast<float *>(dest[slot_idx]);
int index = info.slot_value_idx + float_cols * ins_idx;
int old_off = float_offsets[index];
int num = float_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int float_value_offset = float_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
fp[k + value_offset] = float_feas[k + old_off + float_value_offset];
}
}
}
}
void SlotRecordInMemoryDataFeed::CopyForTensor(
const int ins_num,
const int used_slot_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots,
cudaStream_t stream) {
CopyForTensorKernel<<<GET_BLOCKS(used_slot_num * ins_num),
CUDA_NUM_THREADS,
0,
stream>>>(used_slot_num,
ins_num,
dest,
slot_value_offsets,
uint64_feas,
uint64_offsets,
uint64_ins_lens,
uint64_slot_size,
float_feas,
float_offsets,
float_ins_lens,
float_slot_size,
used_slots);
cudaStreamSynchronize(stream);
}
__global__ void GraphFillCVMKernel(int64_t *tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { tensor[idx] = 1; }
}
__global__ void CopyDuplicateKeys(int64_t *dist_tensor,
uint64_t *src_tensor,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
dist_tensor[idx * 2] = src_tensor[idx];
dist_tensor[idx * 2 + 1] = src_tensor[idx];
}
}
#if defined(PADDLE_WITH_PSCORE) && defined(PADDLE_WITH_GPU_GRAPH)
int GraphDataGenerator::AcquireInstance(BufState *state) {
if (state->GetNextStep()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextCentrolWord()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextBatch()) {
DEBUG_STATE(state);
return state->len;
}
return 0;
}
__global__ void GraphFillIdKernel(uint64_t *id_tensor,
int *fill_ins_num,
uint64_t *walk,
uint8_t *walk_ntype,
int *row,
int central_word,
int step,
int len,
int col_num,
uint8_t *excluded_train_pair,
int excluded_train_pair_len) {
__shared__ uint64_t local_key[CUDA_NUM_THREADS * 2];
__shared__ int local_num;
__shared__ int global_num;
bool need_filter = false;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
// int dst = idx * 2;
// id_tensor[dst] = walk[src];
// id_tensor[dst + 1] = walk[src + step];
if (idx < len) {
int src = row[idx] * col_num + central_word;
if (walk[src] != 0 && walk[src + step] != 0) {
for (int i = 0; i < excluded_train_pair_len; i += 2) {
if (walk_ntype[src] == excluded_train_pair[i] &&
walk_ntype[src + step] == excluded_train_pair[i + 1]) {
// filter this pair
need_filter = true;
break;
}
}
if (!need_filter) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst * 2] = walk[src];
local_key[dst * 2 + 1] = walk[src + step];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(fill_ins_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
id_tensor[global_num * 2 + 2 * threadIdx.x] = local_key[2 * threadIdx.x];
id_tensor[global_num * 2 + 2 * threadIdx.x + 1] =
local_key[2 * threadIdx.x + 1];
}
}
__global__ void GraphFillSlotKernel(uint64_t *id_tensor,
uint64_t *feature_buf,
int len,
int total_ins,
int slot_num,
int *slot_feature_num_map,
int fea_num_per_node,
int *actual_slot_id_map,
int *fea_offset_map) {
CUDA_KERNEL_LOOP(idx, len) {
int fea_idx = idx / total_ins;
int ins_idx = idx % total_ins;
int actual_slot_id = actual_slot_id_map[fea_idx];
int fea_offset = fea_offset_map[fea_idx];
reinterpret_cast<uint64_t *>(id_tensor[actual_slot_id])
[ins_idx * slot_feature_num_map[actual_slot_id] + fea_offset] =
feature_buf[ins_idx * fea_num_per_node + fea_idx];
}
}
__global__ void GraphFillSlotLodKernelOpt(uint64_t *id_tensor,
int len,
int total_ins,
int *slot_feature_num_map) {
CUDA_KERNEL_LOOP(idx, len) {
int slot_idx = idx / total_ins;
int ins_idx = idx % total_ins;
(reinterpret_cast<uint64_t *>(id_tensor[slot_idx]))[ins_idx] =
ins_idx * slot_feature_num_map[slot_idx];
}
}
__global__ void GraphFillSlotLodKernel(int64_t *id_tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { id_tensor[idx] = idx; }
}
// fill sage neighbor results
__global__ void FillActualNeighbors(int64_t *vals,
int64_t *actual_vals,
int64_t *actual_vals_dst,
int *actual_sample_size,
int *cumsum_actual_sample_size,
int sample_size,
int len,
int mod) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
int offset1 = cumsum_actual_sample_size[i];
int offset2 = sample_size * i;
int dst_id = i % mod;
for (int j = 0; j < actual_sample_size[i]; j++) {
actual_vals[offset1 + j] = vals[offset2 + j];
actual_vals_dst[offset1 + j] = dst_id;
}
}
}
int GraphDataGenerator::FillIdShowClkTensor(int total_instance,
bool gpu_graph_training,
size_t cursor) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({total_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({total_instance}, this->place_);
if (gpu_graph_training) {
uint64_t *ins_cursor, *ins_buf;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
cudaMemcpyAsync(id_tensor_ptr_,
ins_cursor,
sizeof(uint64_t) * total_instance,
cudaMemcpyDeviceToDevice,
train_stream_);
} else {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
CopyDuplicateKeys<<<GET_BLOCKS(total_instance / 2),
CUDA_NUM_THREADS,
0,
train_stream_>>>(
id_tensor_ptr_, d_type_keys, total_instance / 2);
}
GraphFillCVMKernel<<<GET_BLOCKS(total_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(show_tensor_ptr_, total_instance);
GraphFillCVMKernel<<<GET_BLOCKS(total_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(clk_tensor_ptr_, total_instance);
return 0;
}
int GraphDataGenerator::FillGraphIdShowClkTensor(int uniq_instance,
int total_instance,
int index) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({uniq_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({uniq_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({uniq_instance}, this->place_);
int index_offset = 3 + slot_num_ * 2 + 5 * samples_.size();
index_tensor_ptr_ = feed_vec_[index_offset]->mutable_data<int>(
{total_instance}, this->place_);
if (get_degree_) {
degree_tensor_ptr_ = feed_vec_[index_offset + 1]->mutable_data<int>(
{uniq_instance * edge_to_id_len_}, this->place_);
}
int len_samples = samples_.size();
int *num_nodes_tensor_ptr_[len_samples];
int *next_num_nodes_tensor_ptr_[len_samples];
int64_t *edges_src_tensor_ptr_[len_samples];
int64_t *edges_dst_tensor_ptr_[len_samples];
int *edges_split_tensor_ptr_[len_samples];
std::vector<std::vector<int>> edges_split_num_for_graph =
edges_split_num_vec_[index];
std::vector<std::shared_ptr<phi::Allocation>> graph_edges =
graph_edges_vec_[index];
for (int i = 0; i < len_samples; i++) {
int offset = 3 + 2 * slot_num_ + 5 * i;
std::vector<int> edges_split_num = edges_split_num_for_graph[i];
int neighbor_len = edges_split_num[edge_to_id_len_ + 2];
num_nodes_tensor_ptr_[i] =
feed_vec_[offset]->mutable_data<int>({1}, this->place_);
next_num_nodes_tensor_ptr_[i] =
feed_vec_[offset + 1]->mutable_data<int>({1}, this->place_);
edges_src_tensor_ptr_[i] = feed_vec_[offset + 2]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_dst_tensor_ptr_[i] = feed_vec_[offset + 3]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_split_tensor_ptr_[i] = feed_vec_[offset + 4]->mutable_data<int>(
{edge_to_id_len_}, this->place_);
// [edges_split_num, next_num_nodes, num_nodes, neighbor_len]
cudaMemcpyAsync(next_num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_,
sizeof(int),
cudaMemcpyHostToDevice,
train_stream_);
cudaMemcpyAsync(num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_ + 1,
sizeof(int),
cudaMemcpyHostToDevice,
train_stream_);
cudaMemcpyAsync(edges_split_tensor_ptr_[i],
edges_split_num.data(),
sizeof(int) * edge_to_id_len_,
cudaMemcpyHostToDevice,
train_stream_);
cudaMemcpyAsync(edges_src_tensor_ptr_[i],
graph_edges[i * 2]->ptr(),
sizeof(int64_t) * neighbor_len,
cudaMemcpyDeviceToDevice,
train_stream_);
cudaMemcpyAsync(edges_dst_tensor_ptr_[i],
graph_edges[i * 2 + 1]->ptr(),
sizeof(int64_t) * neighbor_len,
cudaMemcpyDeviceToDevice,
train_stream_);
}
cudaMemcpyAsync(id_tensor_ptr_,
final_sage_nodes_vec_[index]->ptr(),
sizeof(int64_t) * uniq_instance,
cudaMemcpyDeviceToDevice,
train_stream_);
cudaMemcpyAsync(index_tensor_ptr_,
inverse_vec_[index]->ptr(),
sizeof(int) * total_instance,
cudaMemcpyDeviceToDevice,
train_stream_);
if (get_degree_) {
cudaMemcpyAsync(degree_tensor_ptr_,
node_degree_vec_[index]->ptr(),
sizeof(int) * uniq_instance * edge_to_id_len_,
cudaMemcpyDeviceToDevice,
train_stream_);
}
GraphFillCVMKernel<<<GET_BLOCKS(uniq_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(show_tensor_ptr_, uniq_instance);
GraphFillCVMKernel<<<GET_BLOCKS(uniq_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(clk_tensor_ptr_, uniq_instance);
return 0;
}
int GraphDataGenerator::FillGraphSlotFeature(
int total_instance,
bool gpu_graph_training,
std::shared_ptr<phi::Allocation> final_sage_nodes) {
uint64_t *ins_cursor, *ins_buf;
if (gpu_graph_training) {
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
} else {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
ins_cursor = reinterpret_cast<uint64_t *>(id_tensor_ptr_);
}
if (!sage_mode_) {
return FillSlotFeature(ins_cursor, total_instance);
} else {
uint64_t *sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
return FillSlotFeature(sage_nodes_ptr, total_instance);
}
}
int GraphDataGenerator::MakeInsPair(cudaStream_t stream) {
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
uint8_t *walk_ntype = NULL;
uint8_t *excluded_train_pair = NULL;
if (excluded_train_pair_len_ > 0) {
walk_ntype = reinterpret_cast<uint8_t *>(d_walk_ntype_->ptr());
excluded_train_pair =
reinterpret_cast<uint8_t *>(d_excluded_train_pair_->ptr());
}
uint64_t *ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
int *random_row = reinterpret_cast<int *>(d_random_row_->ptr());
int *d_pair_num = reinterpret_cast<int *>(d_pair_num_->ptr());
cudaMemsetAsync(d_pair_num, 0, sizeof(int), stream);
int len = buf_state_.len;
// make pair
GraphFillIdKernel<<<GET_BLOCKS(len), CUDA_NUM_THREADS, 0, stream>>>(
ins_buf + ins_buf_pair_len_ * 2,
d_pair_num,
walk,
walk_ntype,
random_row + buf_state_.cursor,
buf_state_.central_word,
window_step_[buf_state_.step],
len,
walk_len_,
excluded_train_pair,
excluded_train_pair_len_);
int h_pair_num;
cudaMemcpyAsync(
&h_pair_num, d_pair_num, sizeof(int), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
ins_buf_pair_len_ += h_pair_num;
if (debug_mode_) {
uint64_t h_ins_buf[ins_buf_pair_len_ * 2]; // NOLINT
cudaMemcpy(h_ins_buf,
ins_buf,
2 * ins_buf_pair_len_ * sizeof(uint64_t),
cudaMemcpyDeviceToHost);
VLOG(2) << "h_pair_num = " << h_pair_num
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
for (int xx = 0; xx < ins_buf_pair_len_; xx++) {
VLOG(2) << "h_ins_buf: " << h_ins_buf[xx * 2] << ", "
<< h_ins_buf[xx * 2 + 1];
}
}
return ins_buf_pair_len_;
}
int GraphDataGenerator::FillInsBuf(cudaStream_t stream) {
if (ins_buf_pair_len_ >= batch_size_) {
return batch_size_;
}
int total_instance = AcquireInstance(&buf_state_);
VLOG(2) << "total_ins: " << total_instance;
buf_state_.Debug();
if (total_instance == 0) {
return -1;
}
return MakeInsPair(stream);
}
int GraphDataGenerator::GenerateBatch() {
int total_instance = 0;
platform::CUDADeviceGuard guard(gpuid_);
int res = 0;
if (!gpu_graph_training_) {
// infer
if (!sage_mode_) {
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
VLOG(1) << "in graph_data generator:batch_size = " << batch_size_
<< " instance = " << total_instance;
total_instance *= 2;
if (total_instance == 0) {
return 0;
}
FillIdShowClkTensor(total_instance, gpu_graph_training_, cursor_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
} else {
// train
if (!sage_mode_) {
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(train_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
return 0;
} else {
break;
}
}
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
VLOG(2) << "total_instance: " << total_instance
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
FillIdShowClkTensor(total_instance, gpu_graph_training_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
}
if (slot_num_ > 0) {
if (!sage_mode_) {
FillGraphSlotFeature(total_instance, gpu_graph_training_);
} else {
FillGraphSlotFeature(uniq_instance_vec_[sage_batch_count_],
gpu_graph_training_,
final_sage_nodes_vec_[sage_batch_count_]);
}
}
offset_.clear();
offset_.push_back(0);
if (!sage_mode_) {
offset_.push_back(total_instance);
} else {
offset_.push_back(uniq_instance_vec_[sage_batch_count_]);
sage_batch_count_ += 1;
}
LoD lod{offset_};
feed_vec_[0]->set_lod(lod);
if (slot_num_ > 0) {
for (int i = 0; i < slot_num_; ++i) {
feed_vec_[3 + 2 * i]->set_lod(lod);
}
}
cudaStreamSynchronize(train_stream_);
if (!gpu_graph_training_) return 1;
if (!sage_mode_) {
ins_buf_pair_len_ -= total_instance / 2;
}
return 1;
}
__global__ void GraphFillSampleKeysKernel(uint64_t *neighbors,
uint64_t *sample_keys,
int *prefix_sum,
int *sampleidx2row,
int *tmp_sampleidx2row,
int *actual_sample_size,
int cur_degree,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t offset = prefix_sum[idx] + k;
sample_keys[offset] = neighbors[idx * cur_degree + k];
tmp_sampleidx2row[offset] = sampleidx2row[idx] + k;
}
}
}
__global__ void GraphDoWalkKernel(uint64_t *neighbors,
uint64_t *walk,
uint8_t *walk_ntype,
int *d_prefix_sum,
int *actual_sample_size,
int cur_degree,
int step,
int len,
int *id_cnt,
int *sampleidx2row,
int col_size,
uint8_t edge_dst_id) {
CUDA_KERNEL_LOOP(i, len) {
for (int k = 0; k < actual_sample_size[i]; k++) {
// int idx = sampleidx2row[i];
size_t row = sampleidx2row[k + d_prefix_sum[i]];
// size_t row = idx * cur_degree + k;
size_t col = step;
size_t offset = (row * col_size + col);
walk[offset] = neighbors[i * cur_degree + k];
if (walk_ntype != NULL) {
walk_ntype[offset] = edge_dst_id;
}
}
}
}
// Fill keys to the first column of walk
__global__ void GraphFillFirstStepKernel(int *prefix_sum,
int *sampleidx2row,
uint64_t *walk,
uint8_t *walk_ntype,
uint64_t *keys,
uint8_t edge_src_id,
uint8_t edge_dst_id,
int len,
int walk_degree,
int col_size,
int *actual_sample_size,
uint64_t *neighbors,
uint64_t *sample_keys) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t row = prefix_sum[idx] + k;
sample_keys[row] = neighbors[idx * walk_degree + k];
sampleidx2row[row] = row;
size_t offset = col_size * row;
walk[offset] = keys[idx];
walk[offset + 1] = neighbors[idx * walk_degree + k];
if (walk_ntype != NULL) {
walk_ntype[offset] = edge_src_id;
walk_ntype[offset + 1] = edge_dst_id;
}
}
}
}
__global__ void get_each_ins_info(uint8_t *slot_list,
uint32_t *slot_size_list,
uint32_t *slot_size_prefix,
uint32_t *each_ins_slot_num,
uint32_t *each_ins_slot_num_inner_prefix,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
uint32_t slot_index = slot_size_prefix[i];
size_t each_ins_slot_index = i * slot_num;
for (int j = 0; j < slot_size_list[i]; j++) {
each_ins_slot_num[each_ins_slot_index + slot_list[slot_index + j]] += 1;
}
each_ins_slot_num_inner_prefix[each_ins_slot_index] = 0;
for (int j = 1; j < slot_num; j++) {
each_ins_slot_num_inner_prefix[each_ins_slot_index + j] =
each_ins_slot_num[each_ins_slot_index + j - 1] +
each_ins_slot_num_inner_prefix[each_ins_slot_index + j - 1];
}
}
}
__global__ void fill_slot_num(uint32_t *d_each_ins_slot_num_ptr,
uint64_t **d_ins_slot_num_vector_ptr,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
size_t d_each_index = i * slot_num;
for (int j = 0; j < slot_num; j++) {
d_ins_slot_num_vector_ptr[j][i] =
d_each_ins_slot_num_ptr[d_each_index + j];
}
}
}
__global__ void fill_slot_tensor(uint64_t *feature_list,
uint32_t *feature_size_prefixsum,
uint32_t *each_ins_slot_num_inner_prefix,
uint64_t *ins_slot_num,
int64_t *slot_lod_tensor,
int64_t *slot_tensor,
int slot,
int slot_num,
size_t node_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < node_num) {
size_t dst_index = slot_lod_tensor[i];
size_t src_index = feature_size_prefixsum[i] +
each_ins_slot_num_inner_prefix[slot_num * i + slot];
for (uint64_t j = 0; j < ins_slot_num[i]; j++) {
slot_tensor[dst_index + j] = feature_list[src_index + j];
}
}
}
__global__ void GetUniqueFeaNum(uint64_t *d_in,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
atomicAdd(&local_num, 1);
}
}
if (i == len - 1) {
atomicAdd(&local_num, 1);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(unique_num, local_num);
}
}
__global__ void UniqueFeature(uint64_t *d_in,
uint64_t *d_out,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_key[CUDA_NUM_THREADS];
__shared__ uint64_t local_num;
__shared__ uint64_t global_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
}
if (i == len - 1) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(unique_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
d_out[global_num + threadIdx.x] = local_key[threadIdx.x];
}
}
// Fill sample_res to the stepth column of walk
void GraphDataGenerator::FillOneStep(uint64_t *d_start_ids,
int etype_id,
uint64_t *walk,
uint8_t *walk_ntype,
int len,
NeighborSampleResult &sample_res,
int cur_degree,
int step,
int *len_per_row) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t node_id = gpu_graph_ptr->edge_to_node_map_[etype_id];
uint8_t edge_src_id = node_id >> 32;
uint8_t edge_dst_id = node_id;
size_t temp_storage_bytes = 0;
int *d_actual_sample_size = sample_res.actual_sample_size;
uint64_t *d_neighbors = sample_res.val;
int *d_prefix_sum = reinterpret_cast<int *>(d_prefix_sum_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
int *d_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[cur_sampleidx2row_]->ptr());
int *d_tmp_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[1 - cur_sampleidx2row_]->ptr());
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
cudaStreamSynchronize(sample_stream_);
if (step == 1) {
GraphFillFirstStepKernel<<<GET_BLOCKS(len),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(d_prefix_sum,
d_tmp_sampleidx2row,
walk,
walk_ntype,
d_start_ids,
edge_src_id,
edge_dst_id,
len,
walk_degree_,
walk_len_,
d_actual_sample_size,
d_neighbors,
d_sample_keys);
} else {
GraphFillSampleKeysKernel<<<GET_BLOCKS(len),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(d_neighbors,
d_sample_keys,
d_prefix_sum,
d_sampleidx2row,
d_tmp_sampleidx2row,
d_actual_sample_size,
cur_degree,
len);
GraphDoWalkKernel<<<GET_BLOCKS(len), CUDA_NUM_THREADS, 0, sample_stream_>>>(
d_neighbors,
walk,
walk_ntype,
d_prefix_sum,
d_actual_sample_size,
cur_degree,
step,
len,
len_per_row,
d_tmp_sampleidx2row,
walk_len_,
edge_dst_id);
}
if (debug_mode_) {
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
int *h_prefix_sum = new int[len + 1];
int *h_actual_size = new int[len];
int *h_offset2idx = new int[once_max_sample_keynum];
cudaMemcpy(h_offset2idx,
d_tmp_sampleidx2row,
once_max_sample_keynum * sizeof(int),
cudaMemcpyDeviceToHost);
cudaMemcpy(h_prefix_sum,
d_prefix_sum,
(len + 1) * sizeof(int),
cudaMemcpyDeviceToHost);
for (int xx = 0; xx < once_max_sample_keynum; xx++) {
VLOG(2) << "h_offset2idx[" << xx << "]: " << h_offset2idx[xx];
}
for (int xx = 0; xx < len + 1; xx++) {
VLOG(2) << "h_prefix_sum[" << xx << "]: " << h_prefix_sum[xx];
}
delete[] h_prefix_sum;
delete[] h_actual_size;
delete[] h_offset2idx;
}
cudaStreamSynchronize(sample_stream_);
cur_sampleidx2row_ = 1 - cur_sampleidx2row_;
}
int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk, size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
std::shared_ptr<phi::Allocation> d_feature_list;
std::shared_ptr<phi::Allocation> d_slot_list;
if (sage_mode_) {
size_t temp_storage_bytes = (key_num + 1) * sizeof(uint32_t);
if (d_feature_size_list_buf_ == NULL ||
d_feature_size_list_buf_->size() < temp_storage_bytes) {
d_feature_size_list_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
if (d_feature_size_prefixsum_buf_ == NULL ||
d_feature_size_prefixsum_buf_->size() < temp_storage_bytes) {
d_feature_size_prefixsum_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
}
uint32_t *d_feature_size_list_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_list_buf_->ptr());
uint32_t *d_feature_size_prefixsum_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_prefixsum_buf_->ptr());
int fea_num =
gpu_graph_ptr->get_feature_info_of_nodes(gpuid_,
d_walk,
key_num,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_feature_list,
d_slot_list);
int64_t *slot_tensor_ptr_[slot_num_];
int64_t *slot_lod_tensor_ptr_[slot_num_];
if (fea_num == 0) {
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(cudaMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(int64_t), train_stream_));
CUDA_CHECK(cudaMemsetAsync(slot_lod_tensor_ptr_[i],
0,
sizeof(int64_t) * key_num,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
cudaMemcpyHostToDevice,
train_stream_));
}
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
return 0;
}
uint64_t *d_feature_list_ptr =
reinterpret_cast<uint64_t *>(d_feature_list->ptr());
uint8_t *d_slot_list_ptr = reinterpret_cast<uint8_t *>(d_slot_list->ptr());
std::shared_ptr<phi::Allocation> d_each_ins_slot_num_inner_prefix =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
std::shared_ptr<phi::Allocation> d_each_ins_slot_num =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
uint32_t *d_each_ins_slot_num_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num->ptr());
uint32_t *d_each_ins_slot_num_inner_prefix_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num_inner_prefix->ptr());
CUDA_CHECK(cudaMemsetAsync(d_each_ins_slot_num_ptr,
0,
slot_num_ * key_num * sizeof(uint32_t),
train_stream_));
dim3 grid((key_num - 1) / 256 + 1);
dim3 block(1, 256);
get_each_ins_info<<<grid, block, 0, train_stream_>>>(
d_slot_list_ptr,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
key_num,
slot_num_);
std::vector<std::shared_ptr<phi::Allocation>> ins_slot_num(slot_num_,
nullptr);
std::vector<uint64_t *> ins_slot_num_vecotr(slot_num_, NULL);
std::shared_ptr<phi::Allocation> d_ins_slot_num_vector =
memory::AllocShared(place_, (slot_num_) * sizeof(uint64_t *));
uint64_t **d_ins_slot_num_vector_ptr =
reinterpret_cast<uint64_t **>(d_ins_slot_num_vector->ptr());
for (int i = 0; i < slot_num_; i++) {
ins_slot_num[i] = memory::AllocShared(place_, key_num * sizeof(uint64_t));
ins_slot_num_vecotr[i] =
reinterpret_cast<uint64_t *>(ins_slot_num[i]->ptr());
}
CUDA_CHECK(
cudaMemcpyAsync(reinterpret_cast<char *>(d_ins_slot_num_vector_ptr),
ins_slot_num_vecotr.data(),
sizeof(uint64_t *) * slot_num_,
cudaMemcpyHostToDevice,
train_stream_));
fill_slot_num<<<grid, block, 0, train_stream_>>>(
d_each_ins_slot_num_ptr, d_ins_slot_num_vector_ptr, key_num, slot_num_);
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
}
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
ins_slot_num_vecotr[0],
slot_lod_tensor_ptr_[0] + 1,
key_num,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
auto d_temp_storage = memory::Alloc(
this->place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(train_stream_)));
std::vector<int64_t> each_slot_fea_num(slot_num_, 0);
for (int i = 0; i < slot_num_; ++i) {
CUDA_CHECK(cudaMemsetAsync(
slot_lod_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i] + 1,
key_num,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(&each_slot_fea_num[i],
slot_lod_tensor_ptr_[i] + key_num,
sizeof(uint64_t),
cudaMemcpyDeviceToHost,
train_stream_));
}
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_tensor_ptr_[i] = feed_vec_[3 + 2 * i]->mutable_data<int64_t>(
{each_slot_fea_num[i], 1}, this->place_);
}
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
fill_slot_tensor<<<grid, block, 0, train_stream_>>>(
d_feature_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i],
slot_tensor_ptr_[i],
i,
slot_num_,
key_num);
// trick for empty tensor
if (each_slot_fea_num[i] == 0) {
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(cudaMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(cudaMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
cudaMemcpyHostToDevice,
train_stream_));
}
}
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
if (debug_mode_) {
std::vector<uint32_t> h_feature_size_list(key_num, 0);
std::vector<uint32_t> h_feature_size_list_prefixsum(key_num, 0);
std::vector<uint64_t> node_list(key_num, 0);
std::vector<uint64_t> h_feature_list(fea_num, 0);
std::vector<uint8_t> h_slot_list(fea_num, 0);
CUDA_CHECK(
cudaMemcpyAsync(reinterpret_cast<char *>(h_feature_size_list.data()),
d_feature_size_list_ptr,
sizeof(uint32_t) * key_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(
reinterpret_cast<char *>(h_feature_size_list_prefixsum.data()),
d_feature_size_prefixsum_ptr,
sizeof(uint32_t) * key_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(node_list.data()),
d_walk,
sizeof(uint64_t) * key_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(h_feature_list.data()),
d_feature_list_ptr,
sizeof(uint64_t) * fea_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(h_slot_list.data()),
d_slot_list_ptr,
sizeof(uint8_t) * fea_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
for (size_t i = 0; i < key_num; i++) {
std::stringstream ss;
ss << "node_id: " << node_list[i]
<< " fea_num: " << h_feature_size_list[i] << " offset "
<< h_feature_size_list_prefixsum[i] << " slot: ";
for (uint32_t j = 0; j < h_feature_size_list[i]; j++) {
ss << int(h_slot_list[h_feature_size_list_prefixsum[i] + j]) << " : "
<< h_feature_list[h_feature_size_list_prefixsum[i] + j] << " ";
}
VLOG(0) << ss.str();
}
VLOG(0) << "all fea_num is " << fea_num << " calc fea_num is "
<< h_feature_size_list[key_num - 1] +
h_feature_size_list_prefixsum[key_num - 1];
for (int i = 0; i < slot_num_; ++i) {
std::vector<int64_t> h_slot_lod_tensor(key_num + 1, 0);
CUDA_CHECK(
cudaMemcpyAsync(reinterpret_cast<char *>(h_slot_lod_tensor.data()),
slot_lod_tensor_ptr_[i],
sizeof(int64_t) * (key_num + 1),
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
std::stringstream ss_lod;
std::stringstream ss_tensor;
ss_lod << " slot " << i << " lod is [";
for (size_t j = 0; j < key_num + 1; j++) {
ss_lod << h_slot_lod_tensor[j] << ",";
}
ss_lod << "]";
std::vector<int64_t> h_slot_tensor(h_slot_lod_tensor[key_num], 0);
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(h_slot_tensor.data()),
slot_tensor_ptr_[i],
sizeof(int64_t) * h_slot_lod_tensor[key_num],
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
ss_tensor << " tensor is [ ";
for (size_t j = 0; j < h_slot_lod_tensor[key_num]; j++) {
ss_tensor << h_slot_tensor[j] << ",";
}
ss_tensor << "]";
VLOG(0) << ss_lod.str() << " " << ss_tensor.str();
}
}
return 0;
}
int GraphDataGenerator::FillFeatureBuf(uint64_t *d_walk,
uint64_t *d_feature,
size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
d_walk,
d_feature,
key_num,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
int GraphDataGenerator::FillFeatureBuf(
std::shared_ptr<phi::Allocation> d_walk,
std::shared_ptr<phi::Allocation> d_feature) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
reinterpret_cast<uint64_t *>(d_walk->ptr()),
reinterpret_cast<uint64_t *>(d_feature->ptr()),
buf_size_,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
// 对于deepwalk模式,尝试插入table,0表示插入成功,1表示插入失败;
// 对于sage模式,尝试插入table,table数量不够则清空table重新插入,返回值无影响。
int GraphDataGenerator::InsertTable(
const uint64_t *d_keys,
uint64_t len,
std::shared_ptr<phi::Allocation> d_uniq_node_num) {
// Used under NOT WHOLE_HBM.
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node_num->ptr());
cudaMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num_ptr,
sizeof(uint64_t),
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
if (gpu_graph_training_) {
VLOG(2) << "table capacity: " << train_table_cap_ << ", " << h_uniq_node_num
<< " used";
if (h_uniq_node_num + len >= train_table_cap_) {
if (!sage_mode_) {
return 1;
} else {
// Copy unique nodes first.
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
cudaMemsetAsync(
d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
} else {
// used only for sage_mode.
if (h_uniq_node_num + len >= infer_table_cap_) {
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
cudaMemsetAsync(d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
table_->insert(d_keys, len, d_uniq_node_num_ptr, sample_stream_);
CUDA_CHECK(cudaStreamSynchronize(sample_stream_));
return 0;
}
std::vector<std::shared_ptr<phi::Allocation>>
GraphDataGenerator::SampleNeighbors(int64_t *uniq_nodes,
int len,
int sample_size,
std::vector<int> &edges_split_num,
int64_t *neighbor_len) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_all_edge_type(
gpuid_,
edge_to_id_len_,
reinterpret_cast<uint64_t *>(uniq_nodes),
sample_size,
len,
edge_type_graph_);
int *all_sample_count_ptr =
reinterpret_cast<int *>(sample_res.actual_sample_size_mem->ptr());
auto cumsum_actual_sample_size = memory::Alloc(
place_,
(len * edge_to_id_len_ + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *cumsum_actual_sample_size_ptr =
reinterpret_cast<int *>(cumsum_actual_sample_size->ptr());
cudaMemsetAsync(cumsum_actual_sample_size_ptr,
0,
(len * edge_to_id_len_ + 1) * sizeof(int),
sample_stream_);
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
cudaStreamSynchronize(sample_stream_);
edges_split_num.resize(edge_to_id_len_);
for (int i = 0; i < edge_to_id_len_; i++) {
cudaMemcpyAsync(edges_split_num.data() + i,
cumsum_actual_sample_size_ptr + (i + 1) * len,
sizeof(int),
cudaMemcpyDeviceToHost,
sample_stream_);
}
CUDA_CHECK(cudaStreamSynchronize(sample_stream_));
int all_sample_size = edges_split_num[edge_to_id_len_ - 1];
auto final_sample_val = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sample_val_dst = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *final_sample_val_ptr =
reinterpret_cast<int64_t *>(final_sample_val->ptr());
int64_t *final_sample_val_dst_ptr =
reinterpret_cast<int64_t *>(final_sample_val_dst->ptr());
int64_t *all_sample_val_ptr =
reinterpret_cast<int64_t *>(sample_res.val_mem->ptr());
FillActualNeighbors<<<GET_BLOCKS(len * edge_to_id_len_),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(all_sample_val_ptr,
final_sample_val_ptr,
final_sample_val_dst_ptr,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr,
sample_size,
len * edge_to_id_len_,
len);
*neighbor_len = all_sample_size;
cudaStreamSynchronize(sample_stream_);
std::vector<std::shared_ptr<phi::Allocation>> sample_results;
sample_results.emplace_back(final_sample_val);
sample_results.emplace_back(final_sample_val_dst);
return sample_results;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::FillReindexHashTable(
int64_t *input,
int num_input,
int64_t len_hashtable,
int64_t *keys,
int *values,
int *key_index,
int *final_nodes_len) {
phi::BuildHashTable<int64_t>
<<<GET_BLOCKS(num_input), CUDA_NUM_THREADS, 0, sample_stream_>>>(
input, num_input, len_hashtable, keys, key_index);
// Get item index count.
auto item_count = memory::Alloc(
place_,
(num_input + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *item_count_ptr = reinterpret_cast<int *>(item_count->ptr());
cudaMemsetAsync(
item_count_ptr, 0, sizeof(int) * (num_input + 1), sample_stream_);
phi::GetItemIndexCount<int64_t>
<<<GET_BLOCKS(num_input), CUDA_NUM_THREADS, 0, sample_stream_>>>(
input, item_count_ptr, num_input, len_hashtable, keys, key_index);
size_t temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(NULL,
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
cub::DeviceScan::ExclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
int total_unique_items = 0;
cudaMemcpyAsync(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
auto unique_items = memory::AllocShared(
place_,
total_unique_items * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *unique_items_ptr = reinterpret_cast<int64_t *>(unique_items->ptr());
*final_nodes_len = total_unique_items;
// Get unique items
phi::FillUniqueItems<int64_t>
<<<GET_BLOCKS(num_input), CUDA_NUM_THREADS, 0, sample_stream_>>>(
input,
num_input,
len_hashtable,
unique_items_ptr,
item_count_ptr,
keys,
values,
key_index);
cudaStreamSynchronize(sample_stream_);
return unique_items;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GetReindexResult(
int64_t *reindex_src_data,
int64_t *center_nodes,
int *final_nodes_len,
int node_len,
int64_t neighbor_len) {
// Reset reindex table
int64_t *d_reindex_table_key_ptr =
reinterpret_cast<int64_t *>(d_reindex_table_key_->ptr());
int *d_reindex_table_value_ptr =
reinterpret_cast<int *>(d_reindex_table_value_->ptr());
int *d_reindex_table_index_ptr =
reinterpret_cast<int *>(d_reindex_table_index_->ptr());
// Fill table with -1.
cudaMemsetAsync(d_reindex_table_key_ptr,
-1,
reindex_table_size_ * sizeof(int64_t),
sample_stream_);
cudaMemsetAsync(d_reindex_table_value_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
cudaMemsetAsync(d_reindex_table_index_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
auto all_nodes = memory::AllocShared(
place_,
(node_len + neighbor_len) * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *all_nodes_data = reinterpret_cast<int64_t *>(all_nodes->ptr());
cudaMemcpyAsync(all_nodes_data,
center_nodes,
sizeof(int64_t) * node_len,
cudaMemcpyDeviceToDevice,
sample_stream_);
cudaMemcpyAsync(all_nodes_data + node_len,
reindex_src_data,
sizeof(int64_t) * neighbor_len,
cudaMemcpyDeviceToDevice,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
auto final_nodes = FillReindexHashTable(all_nodes_data,
node_len + neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr,
d_reindex_table_index_ptr,
final_nodes_len);
phi::ReindexSrcOutput<int64_t>
<<<GET_BLOCKS(neighbor_len), CUDA_NUM_THREADS, 0, sample_stream_>>>(
reindex_src_data,
neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr);
return final_nodes;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GenerateSampleGraph(
uint64_t *node_ids,
int len,
int *final_len,
std::shared_ptr<phi::Allocation> &inverse) {
VLOG(2) << "Get Unique Nodes";
auto uniq_nodes = memory::Alloc(
place_,
len * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *inverse_ptr = reinterpret_cast<int *>(inverse->ptr());
int64_t *uniq_nodes_data = reinterpret_cast<int64_t *>(uniq_nodes->ptr());
int uniq_len = dedup_keys_and_fillidx(
len,
node_ids,
reinterpret_cast<uint64_t *>(uniq_nodes_data),
reinterpret_cast<uint64_t *>(d_sorted_keys_->ptr()),
reinterpret_cast<uint32_t *>(inverse_ptr),
reinterpret_cast<uint32_t *>(d_sorted_idx_->ptr()),
reinterpret_cast<uint32_t *>(d_offset_->ptr()),
reinterpret_cast<uint32_t *>(d_merged_cnts_->ptr()),
sample_stream_,
d_buf_,
place_);
int len_samples = samples_.size();
VLOG(2) << "Sample Neighbors and Reindex";
std::vector<int> edges_split_num;
std::vector<std::shared_ptr<phi::Allocation>> final_nodes_vec;
std::vector<std::shared_ptr<phi::Allocation>> graph_edges;
std::vector<std::vector<int>> edges_split_num_for_graph;
std::vector<int> final_nodes_len_vec;
for (int i = 0; i < len_samples; i++) {
edges_split_num.clear();
std::shared_ptr<phi::Allocation> neighbors, reindex_dst;
int64_t neighbors_len = 0;
if (i == 0) {
auto sample_results = SampleNeighbors(uniq_nodes_data,
uniq_len,
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(uniq_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto sample_results = SampleNeighbors(final_nodes_data,
final_nodes_len_vec[i - 1],
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(final_nodes_len_vec[i - 1]);
}
int64_t *reindex_src_data = reinterpret_cast<int64_t *>(neighbors->ptr());
int final_nodes_len = 0;
if (i == 0) {
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
uniq_nodes_data,
&final_nodes_len,
uniq_len,
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
final_nodes_data,
&final_nodes_len,
final_nodes_len_vec[i - 1],
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
}
edges_split_num.emplace_back(
final_nodes_len_vec[i]); // [edges_split_num, next_num_nodes,
// num_nodes]
edges_split_num.emplace_back(neighbors_len);
graph_edges.emplace_back(neighbors);
graph_edges.emplace_back(reindex_dst);
edges_split_num_for_graph.emplace_back(edges_split_num);
}
graph_edges_vec_.emplace_back(graph_edges);
edges_split_num_vec_.emplace_back(edges_split_num_for_graph);
*final_len = final_nodes_len_vec[len_samples - 1];
return final_nodes_vec[len_samples - 1];
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GetNodeDegree(
uint64_t *node_ids, int len) {
auto node_degree = memory::AllocShared(
place_,
len * edge_to_id_len_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto edge_to_id = gpu_graph_ptr->edge_to_id;
for (auto &iter : edge_to_id) {
int edge_idx = iter.second;
gpu_graph_ptr->get_node_degree(
gpuid_, edge_idx, node_ids, len, node_degree);
}
return node_degree;
}
uint64_t GraphDataGenerator::CopyUniqueNodes() {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num =
reinterpret_cast<uint64_t *>(d_uniq_node_num_->ptr());
cudaMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num,
sizeof(uint64_t),
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
auto d_uniq_node = memory::AllocShared(
place_,
h_uniq_node_num * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_uniq_node_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node->ptr());
auto d_node_cursor = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_node_cursor_ptr =
reinterpret_cast<uint64_t *>(d_node_cursor->ptr());
cudaMemsetAsync(d_node_cursor_ptr, 0, sizeof(uint64_t), sample_stream_);
// uint64_t unused_key = std::numeric_limits<uint64_t>::max();
table_->get_keys(d_uniq_node_ptr, d_node_cursor_ptr, sample_stream_);
cudaStreamSynchronize(sample_stream_);
host_vec_.resize(h_uniq_node_num + copy_unique_len_);
cudaMemcpyAsync(host_vec_.data() + copy_unique_len_,
d_uniq_node_ptr,
sizeof(uint64_t) * h_uniq_node_num,
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
return h_uniq_node_num;
}
return 0;
}
void GraphDataGenerator::DoWalkandSage() {
int device_id = place_.GetDeviceId();
debug_gpu_memory_info(device_id, "DoWalkandSage start");
platform::CUDADeviceGuard guard(gpuid_);
if (gpu_graph_training_) {
// train
bool train_flag;
if (FLAGS_graph_metapath_split_opt) {
train_flag = FillWalkBufMultiPath();
} else {
train_flag = FillWalkBuf();
}
if (sage_mode_) {
sage_batch_num_ = 0;
if (train_flag) {
int total_instance = 0, uniq_instance = 0;
bool ins_pair_flag = true;
uint64_t *ins_buf, *ins_cursor;
while (ins_pair_flag) {
int res = 0;
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(sample_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
ins_pair_flag = false;
}
break;
}
}
if (!ins_pair_flag) {
break;
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
ins_cursor, total_instance, &uniq_instance, inverse);
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
if (get_degree_) {
auto node_degrees =
GetNodeDegree(final_sage_nodes_ptr, uniq_instance);
node_degree_vec_.emplace_back(node_degrees);
}
cudaStreamSynchronize(sample_stream_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
ins_buf_pair_len_ -= total_instance / 2;
sage_batch_num_ += 1;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "train sage_batch_num: " << sage_batch_num_;
}
}
} else {
// infer
bool infer_flag = FillInferBuf();
if (sage_mode_) {
sage_batch_num_ = 0;
if (infer_flag) {
int total_instance = 0, uniq_instance = 0;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
while (total_instance != 0) {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor_]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
auto node_buf = memory::AllocShared(
place_,
total_instance * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *node_buf_ptr = reinterpret_cast<int64_t *>(node_buf->ptr());
CopyDuplicateKeys<<<GET_BLOCKS(total_instance / 2),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(
node_buf_ptr, d_type_keys, total_instance / 2);
uint64_t *node_buf_ptr_ =
reinterpret_cast<uint64_t *>(node_buf->ptr());
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
node_buf_ptr_, total_instance, &uniq_instance, inverse);
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
if (get_degree_) {
auto node_degrees =
GetNodeDegree(final_sage_nodes_ptr, uniq_instance);
node_degree_vec_.emplace_back(node_degrees);
}
cudaStreamSynchronize(sample_stream_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
sage_batch_num_ += 1;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "infer sage_batch_num: " << sage_batch_num_;
}
}
}
debug_gpu_memory_info(device_id, "DoWalkandSage end");
}
void GraphDataGenerator::clear_gpu_mem() {
d_len_per_row_.reset();
d_sample_keys_.reset();
d_prefix_sum_.reset();
for (size_t i = 0; i < d_sampleidx2rows_.size(); i++) {
d_sampleidx2rows_[i].reset();
}
delete table_;
if (sage_mode_) {
d_reindex_table_key_.reset();
d_reindex_table_value_.reset();
d_reindex_table_index_.reset();
d_sorted_keys_.reset();
d_sorted_idx_.reset();
d_offset_.reset();
d_merged_cnts_.reset();
}
}
int GraphDataGenerator::FillInferBuf() {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &global_infer_node_type_start =
gpu_graph_ptr->global_infer_node_type_start_[gpuid_];
auto &infer_cursor = gpu_graph_ptr->infer_cursor_[thread_id_];
total_row_ = 0;
if (infer_cursor < h_device_keys_len_.size()) {
if (global_infer_node_type_start[infer_cursor] >=
h_device_keys_len_[infer_cursor]) {
infer_cursor++;
if (infer_cursor >= h_device_keys_len_.size()) {
return 0;
}
}
if (!infer_node_type_index_set_.empty()) {
while (infer_cursor < h_device_keys_len_.size()) {
if (infer_node_type_index_set_.find(infer_cursor) ==
infer_node_type_index_set_.end()) {
VLOG(2) << "Skip cursor[" << infer_cursor << "]";
infer_cursor++;
continue;
} else {
VLOG(2) << "Not skip cursor[" << infer_cursor << "]";
break;
}
}
if (infer_cursor >= h_device_keys_len_.size()) {
return 0;
}
}
size_t device_key_size = h_device_keys_len_[infer_cursor];
total_row_ =
(global_infer_node_type_start[infer_cursor] + infer_table_cap_ <=
device_key_size)
? infer_table_cap_
: device_key_size - global_infer_node_type_start[infer_cursor];
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[infer_cursor]->ptr());
if (!sage_mode_) {
host_vec_.resize(total_row_);
cudaMemcpyAsync(host_vec_.data(),
d_type_keys + global_infer_node_type_start[infer_cursor],
sizeof(uint64_t) * total_row_,
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
}
VLOG(1) << "cursor: " << infer_cursor
<< " start: " << global_infer_node_type_start[infer_cursor]
<< " num: " << total_row_;
infer_node_start_ = global_infer_node_type_start[infer_cursor];
global_infer_node_type_start[infer_cursor] += total_row_;
infer_node_end_ = global_infer_node_type_start[infer_cursor];
cursor_ = infer_cursor;
}
return 1;
}
void GraphDataGenerator::ClearSampleState() {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
finish_node_type.clear();
for (auto iter = node_type_start.begin(); iter != node_type_start.end();
iter++) {
iter->second = 0;
}
}
int GraphDataGenerator::FillWalkBuf() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
cudaMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
uint8_t *walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
walk_ntype = reinterpret_cast<uint8_t *>(d_walk_ntype_->ptr());
cudaMemsetAsync(walk_ntype, 0, buf_size_ * sizeof(uint8_t), sample_stream_);
}
// cudaMemsetAsync(
// len_per_row, 0, once_max_sample_keynum * sizeof(int), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
// 获取全局采样状态
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
auto &cursor = gpu_graph_ptr->cursor_[thread_id_];
size_t node_type_len = first_node_type.size();
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
int cur_node_idx = cursor % node_type_len;
int node_type = first_node_type[cur_node_idx];
auto &path = meta_path[cur_node_idx];
size_t start = node_type_start[node_type];
VLOG(2) << "cur_node_idx = " << cur_node_idx
<< " meta_path.size = " << meta_path.size();
// auto node_query_result = gpu_graph_ptr->query_node_list(
// gpuid_, node_type, start, once_sample_startid_len_);
// int tmp_len = node_query_result.actual_sample_size;
VLOG(2) << "choose start type: " << node_type;
int type_index = type_to_index[node_type];
size_t device_key_size = h_device_keys_len_[type_index];
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[type_index]->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
finish_node_type.insert(node_type);
if (finish_node_type.size() == node_type_start.size()) {
cursor = 0;
epoch_finish_ = true;
break;
}
cursor += 1;
continue;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
uint8_t *cur_walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
cur_walk_ntype = walk_ntype + i;
}
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< " cursor = " << node_type << " cur_node_idx = " << cur_node_idx
<< " jump row: " << jump_rows_;
VLOG(2) << "jump_row: " << jump_rows_;
if (jump_rows_ == 0) {
node_type_start[node_type] = tmp_len + start;
cursor += 1;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
path[0],
cur_walk,
cur_walk_ntype,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
edge_type_id,
cur_walk,
cur_walk_ntype,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
// 此时更新全局采样状态
if (update == true) {
node_type_start[node_type] = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
cursor += 1;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::cuda::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
cudaStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
cudaMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
cudaMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
} else {
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
int GraphDataGenerator::FillWalkBufMultiPath() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
uint8_t *walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
walk_ntype = reinterpret_cast<uint8_t *>(d_walk_ntype_->ptr());
}
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
cudaMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
// 获取全局采样状态
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &cur_metapath = gpu_graph_ptr->cur_metapath_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &path = gpu_graph_ptr->cur_parse_metapath_;
auto &cur_metapath_start = gpu_graph_ptr->cur_metapath_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
size_t node_type_len = first_node_type.size();
std::string first_node =
paddle::string::split_string<std::string>(cur_metapath, "2")[0];
auto it = gpu_graph_ptr->node_to_id.find(first_node);
auto node_type = it->second;
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
size_t start = cur_metapath_start;
size_t device_key_size = h_train_metapath_keys_len_;
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_train_metapath_keys_->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
break;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
uint8_t *cur_walk_ntype = NULL;
if (excluded_train_pair_len_ > 0) {
cur_walk_ntype = walk_ntype + i;
}
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< "jump row: " << jump_rows_;
if (jump_rows_ == 0) {
cur_metapath_start = tmp_len + start;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
path[0],
cur_walk,
cur_walk_ntype,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
edge_type_id,
cur_walk,
cur_walk_ntype,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
// 此时更新全局采样状态
if (update == true) {
cur_metapath_start = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::cuda::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
cudaStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
cudaMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
cudaMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", h_uniq_node_num:" << h_uniq_node_num
<< ", total_samples:" << total_samples;
} else {
VLOG(1) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
void GraphDataGenerator::SetFeedVec(std::vector<phi::DenseTensor *> feed_vec) {
feed_vec_ = feed_vec;
}
void GraphDataGenerator::AllocResource(
int thread_id, std::vector<phi::DenseTensor *> feed_vec) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
gpuid_ = gpu_graph_ptr->device_id_mapping[thread_id];
thread_id_ = thread_id;
place_ = platform::CUDAPlace(gpuid_);
debug_gpu_memory_info(gpuid_, "AllocResource start");
platform::CUDADeviceGuard guard(gpuid_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (gpu_graph_training_) {
table_ = new HashTable<uint64_t, uint64_t>(
train_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
} else {
table_ = new HashTable<uint64_t, uint64_t>(
infer_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
}
}
VLOG(1) << "AllocResource gpuid " << gpuid_
<< " feed_vec.size: " << feed_vec.size()
<< " table cap: " << train_table_cap_;
sample_stream_ = gpu_graph_ptr->get_local_stream(gpuid_);
train_stream_ = dynamic_cast<phi::GPUContext *>(
platform::DeviceContextPool::Instance().Get(place_))
->stream();
// feed_vec_ = feed_vec;
if (!sage_mode_) {
slot_num_ = (feed_vec.size() - 3) / 2;
} else {
slot_num_ = (feed_vec.size() - 4 - samples_.size() * 5) / 2;
}
// infer_node_type_start_ = std::vector<int>(h_device_keys_.size(), 0);
// for (size_t i = 0; i < h_device_keys_.size(); i++) {
// for (size_t j = 0; j < h_device_keys_[i]->size(); j++) {
// VLOG(3) << "h_device_keys_[" << i << "][" << j
// << "] = " << (*(h_device_keys_[i]))[j];
// }
// auto buf = memory::AllocShared(
// place_, h_device_keys_[i]->size() * sizeof(uint64_t));
// d_device_keys_.push_back(buf);
// CUDA_CHECK(cudaMemcpyAsync(buf->ptr(),
// h_device_keys_[i]->data(),
// h_device_keys_[i]->size() * sizeof(uint64_t),
// cudaMemcpyHostToDevice,
// stream_));
// }
if (gpu_graph_training_ && FLAGS_graph_metapath_split_opt) {
d_train_metapath_keys_ =
gpu_graph_ptr->d_graph_train_total_keys_[thread_id];
h_train_metapath_keys_len_ =
gpu_graph_ptr->h_graph_train_keys_len_[thread_id];
VLOG(2) << "h train metapaths key len: " << h_train_metapath_keys_len_;
} else {
auto &d_graph_all_type_keys = gpu_graph_ptr->d_graph_all_type_total_keys_;
auto &h_graph_all_type_keys_len = gpu_graph_ptr->h_graph_all_type_keys_len_;
for (size_t i = 0; i < d_graph_all_type_keys.size(); i++) {
d_device_keys_.push_back(d_graph_all_type_keys[i][thread_id]);
h_device_keys_len_.push_back(h_graph_all_type_keys_len[i][thread_id]);
}
VLOG(2) << "h_device_keys size: " << h_device_keys_len_.size();
}
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
d_prefix_sum_ = memory::AllocShared(
place_,
(once_max_sample_keynum + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *d_prefix_sum_ptr = reinterpret_cast<int *>(d_prefix_sum_->ptr());
cudaMemsetAsync(d_prefix_sum_ptr,
0,
(once_max_sample_keynum + 1) * sizeof(int),
sample_stream_);
cursor_ = 0;
jump_rows_ = 0;
d_uniq_node_num_ = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
cudaMemsetAsync(d_uniq_node_num_->ptr(), 0, sizeof(uint64_t), sample_stream_);
d_walk_ = memory::AllocShared(
place_,
buf_size_ * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
cudaMemsetAsync(
d_walk_->ptr(), 0, buf_size_ * sizeof(uint64_t), sample_stream_);
excluded_train_pair_len_ = gpu_graph_ptr->excluded_train_pair_.size();
if (excluded_train_pair_len_ > 0) {
d_excluded_train_pair_ = memory::AllocShared(
place_,
excluded_train_pair_len_ * sizeof(uint8_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(cudaMemcpyAsync(d_excluded_train_pair_->ptr(),
gpu_graph_ptr->excluded_train_pair_.data(),
excluded_train_pair_len_ * sizeof(uint8_t),
cudaMemcpyHostToDevice,
sample_stream_));
d_walk_ntype_ = memory::AllocShared(
place_,
buf_size_ * sizeof(uint8_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
cudaMemsetAsync(
d_walk_ntype_->ptr(), 0, buf_size_ * sizeof(uint8_t), sample_stream_);
}
d_sample_keys_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
cur_sampleidx2row_ = 0;
d_len_per_row_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
for (int i = -window_; i < 0; i++) {
window_step_.push_back(i);
}
for (int i = 0; i < window_; i++) {
window_step_.push_back(i + 1);
}
buf_state_.Init(batch_size_, walk_len_, &window_step_);
d_random_row_ = memory::AllocShared(
place_,
(once_sample_startid_len_ * walk_degree_ * repeat_time_) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
shuffle_seed_ = 0;
ins_buf_pair_len_ = 0;
if (!sage_mode_) {
d_ins_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 * 2) * sizeof(uint64_t));
d_pair_num_ = memory::AllocShared(place_, sizeof(int));
} else {
d_ins_buf_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_pair_num_ = memory::AllocShared(
place_,
sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
d_slot_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
d_slot_lod_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
if (sage_mode_) {
reindex_table_size_ = batch_size_ * 2;
// get hashtable size
for (int i = 0; i < samples_.size(); i++) {
reindex_table_size_ *= (samples_[i] * edge_to_id_len_ + 1);
}
int64_t next_pow2 =
1 << static_cast<size_t>(1 + std::log2(reindex_table_size_ >> 1));
reindex_table_size_ = next_pow2 << 1;
d_reindex_table_key_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_value_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_index_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
edge_type_graph_ =
gpu_graph_ptr->get_edge_type_graph(gpuid_, edge_to_id_len_);
d_sorted_keys_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sorted_idx_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_offset_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_merged_cnts_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
// parse infer_node_type
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
if (!gpu_graph_training_) {
auto node_types =
paddle::string::split_string<std::string>(infer_node_type_, ";");
auto node_to_id = gpu_graph_ptr->node_to_id;
for (auto &type : node_types) {
auto iter = node_to_id.find(type);
PADDLE_ENFORCE_NE(
iter,
node_to_id.end(),
platform::errors::NotFound("(%s) is not found in node_to_id.", type));
int node_type = iter->second;
int type_index = type_to_index[node_type];
VLOG(2) << "add node[" << type
<< "] into infer_node_type, type_index(cursor)[" << type_index
<< "]";
infer_node_type_index_set_.insert(type_index);
}
VLOG(2) << "infer_node_type_index_set_num: "
<< infer_node_type_index_set_.size();
}
cudaStreamSynchronize(sample_stream_);
debug_gpu_memory_info(gpuid_, "AllocResource end");
}
void GraphDataGenerator::AllocTrainResource(int thread_id) {
if (slot_num_ > 0) {
platform::CUDADeviceGuard guard(gpuid_);
if (!sage_mode_) {
d_feature_size_list_buf_ =
memory::AllocShared(place_, (batch_size_ * 2) * sizeof(uint32_t));
d_feature_size_prefixsum_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 + 1) * sizeof(uint32_t));
} else {
d_feature_size_list_buf_ = NULL;
d_feature_size_prefixsum_buf_ = NULL;
}
}
}
void GraphDataGenerator::SetConfig(
const paddle::framework::DataFeedDesc &data_feed_desc) {
auto graph_config = data_feed_desc.graph_config();
walk_degree_ = graph_config.walk_degree();
walk_len_ = graph_config.walk_len();
window_ = graph_config.window();
once_sample_startid_len_ = graph_config.once_sample_startid_len();
debug_mode_ = graph_config.debug_mode();
gpu_graph_training_ = graph_config.gpu_graph_training();
if (debug_mode_ || !gpu_graph_training_) {
batch_size_ = graph_config.batch_size();
} else {
batch_size_ = once_sample_startid_len_;
}
repeat_time_ = graph_config.sample_times_one_chunk();
buf_size_ =
once_sample_startid_len_ * walk_len_ * walk_degree_ * repeat_time_;
train_table_cap_ = graph_config.train_table_cap();
infer_table_cap_ = graph_config.infer_table_cap();
get_degree_ = graph_config.get_degree();
epoch_finish_ = false;
VLOG(1) << "Confirm GraphConfig, walk_degree : " << walk_degree_
<< ", walk_len : " << walk_len_ << ", window : " << window_
<< ", once_sample_startid_len : " << once_sample_startid_len_
<< ", sample_times_one_chunk : " << repeat_time_
<< ", batch_size: " << batch_size_
<< ", train_table_cap: " << train_table_cap_
<< ", infer_table_cap: " << infer_table_cap_;
std::string first_node_type = graph_config.first_node_type();
std::string meta_path = graph_config.meta_path();
sage_mode_ = graph_config.sage_mode();
std::string str_samples = graph_config.samples();
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
debug_gpu_memory_info("init_conf start");
gpu_graph_ptr->init_conf(
first_node_type, meta_path, graph_config.excluded_train_pair());
debug_gpu_memory_info("init_conf end");
auto edge_to_id = gpu_graph_ptr->edge_to_id;
edge_to_id_len_ = edge_to_id.size();
sage_batch_count_ = 0;
auto samples = paddle::string::split_string<std::string>(str_samples, ";");
for (size_t i = 0; i < samples.size(); i++) {
int sample_size = std::stoi(samples[i]);
samples_.emplace_back(sample_size);
}
copy_unique_len_ = 0;
if (!gpu_graph_training_) {
infer_node_type_ = graph_config.infer_node_type();
}
}
#endif
void GraphDataGenerator::DumpWalkPath(std::string dump_path, size_t dump_rate) {
#ifdef _LINUX
PADDLE_ENFORCE_LT(
dump_rate,
10000000,
platform::errors::InvalidArgument(
"dump_rate can't be large than 10000000. Please check the dump "
"rate[1, 10000000]"));
PADDLE_ENFORCE_GT(dump_rate,
1,
platform::errors::InvalidArgument(
"dump_rate can't be less than 1. Please check "
"the dump rate[1, 10000000]"));
int err_no = 0;
std::shared_ptr<FILE> fp = fs_open_append_write(dump_path, &err_no, "");
uint64_t *h_walk = new uint64_t[buf_size_];
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
VLOG(1) << "DumpWalkPath all buf_size_:" << buf_size_;
std::string ss = "";
size_t write_count = 0;
for (int xx = 0; xx < buf_size_ / dump_rate; xx += walk_len_) {
ss = "";
for (int yy = 0; yy < walk_len_; yy++) {
ss += std::to_string(h_walk[xx + yy]) + "-";
}
write_count = fwrite_unlocked(ss.data(), 1, ss.length(), fp.get());
if (write_count != ss.length()) {
VLOG(1) << "dump walk path" << ss << " failed";
}
write_count = fwrite_unlocked("\n", 1, 1, fp.get());
}
#endif
}
} // namespace framework
} // namespace paddle
#endif
|
75a6f43014d5a3b3e469693e1b844d373644a820.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
__global__
void bin_search(int* a, int* l, int* r, int* e, int* searchValue) {
int idx = threadIdx.x;
int lm = l[0];
int rm = r[0];
int gap = (int)ceil((float)(rm-lm+1)/(float)(256));
int num_proc = (int)ceil((float)(rm - lm + 1)/(float)gap);
int currl = idx*gap + lm;
if(currl > rm) return;
int currr = min((idx+1)*gap + lm,rm+1) - 1;
if(searchValue[0] >= a[currl] && searchValue[0] <= a[currr]) {
l[0] = currl;
r[0] = currr;
}
}
int main(int argc, char* argv[]) {
int n;
scanf("%d",&n);
int *a;
int *searchValue;
hipMallocManaged(&a, n*sizeof(int));
hipMallocManaged(&searchValue, sizeof(int));
scanf("%d",&searchValue[0]);
for(int i=0;i<n;i++) scanf("%d",&a[i]);
int *l, *r;
int *e;
hipMallocManaged(&l, sizeof(int));
hipMallocManaged(&r, sizeof(int));
hipMallocManaged(&e, sizeof(int));
l[0] = 0; r[0] = (n-1);
while(l[0] < r[0]) {
hipLaunchKernelGGL(( bin_search), dim3(1),dim3(256), 0, 0, a,l,r,e,searchValue);
hipDeviceSynchronize();
}
printf("%d\n",l[0]);
hipFree(a);
hipFree(l);
hipFree(r);
hipFree(e);
hipFree(searchValue);
return 0;
} | 75a6f43014d5a3b3e469693e1b844d373644a820.cu | #include <stdio.h>
#include <time.h>
__global__
void bin_search(int* a, int* l, int* r, int* e, int* searchValue) {
int idx = threadIdx.x;
int lm = l[0];
int rm = r[0];
int gap = (int)ceil((float)(rm-lm+1)/(float)(256));
int num_proc = (int)ceil((float)(rm - lm + 1)/(float)gap);
int currl = idx*gap + lm;
if(currl > rm) return;
int currr = min((idx+1)*gap + lm,rm+1) - 1;
if(searchValue[0] >= a[currl] && searchValue[0] <= a[currr]) {
l[0] = currl;
r[0] = currr;
}
}
int main(int argc, char* argv[]) {
int n;
scanf("%d",&n);
int *a;
int *searchValue;
cudaMallocManaged(&a, n*sizeof(int));
cudaMallocManaged(&searchValue, sizeof(int));
scanf("%d",&searchValue[0]);
for(int i=0;i<n;i++) scanf("%d",&a[i]);
int *l, *r;
int *e;
cudaMallocManaged(&l, sizeof(int));
cudaMallocManaged(&r, sizeof(int));
cudaMallocManaged(&e, sizeof(int));
l[0] = 0; r[0] = (n-1);
while(l[0] < r[0]) {
bin_search<<<1,256>>>(a,l,r,e,searchValue);
cudaDeviceSynchronize();
}
printf("%d\n",l[0]);
cudaFree(a);
cudaFree(l);
cudaFree(r);
cudaFree(e);
cudaFree(searchValue);
return 0;
} |
2206d412ba6fef36df499f5d798937b63efb1546.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <helper_cuda.h>
#include <list>
#include <sstream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <time.h>
#include <iostream>
#define FULL_MASK 0xffffffff
#define NUM_THREADS_PER_BLOCK 512
class Points
{
float *X;
float *Y;
public:
__host__ __device__ Points() : X(NULL), Y(NULL) {}
__host__ __device__ Points( float *x, float *y ) : X(x), Y(y) {}
__host__ __device__ __forceinline__ float2 getPoint( int idx ) const
{
return make_float2( X[idx], Y[idx] );
}
__host__ __device__ __forceinline__ void setPoint( int idx, const float2 &p )
{
X[idx] = p.x;
Y[idx] = p.y;
}
__host__ __device__ __forceinline__ void set( float *x, float *y )
{
X = x;
Y = y;
}
};
class Bounding_Box{
float xMin , xMax, yMin, yMax;
public:
__host__ __device__ Bounding_Box(){
xMin = -700;
yMin = -700;
xMax = 700;
yMax = 700;
}
__host__ __device__ float2 computeCenter(){
float2 center;
center.x = 0.5f * ( xMin + xMax);
center.y = 0.5f * ( yMin + yMax );
return center;
}
__host__ __device__ __forceinline__ float getxMax() const {
return xMax;
}
__host__ __device__ __forceinline__ float getyMax() const {
return yMax;
}
__host__ __device__ __forceinline__ float getyMin() const {
return yMin;
}
__host__ __device__ __forceinline__ float getxMin() const {
return xMin;
}
__host__ __device__ __forceinline__ void printBox() const {
printf("%f %f %f %f ", xMin, yMax, xMax, yMax);
printf("%f %f %f %f\n", xMax, yMin, xMin , yMin );
}
__host__ __device__ bool contains(const float2 &p) const {
return (p.x >= xMin && p.y >= yMin && p.x < xMax && p.y < yMax);
}
__host__ __device__ void set(float x, float y, float X, float Y){
xMin = x;
yMin = y;
xMax = X;
yMax = Y;
}
};
class Quadtree_Node{
//node index;
int idx;
Bounding_Box bb;
//startIdx of points in the bb for the global data array
int startIdx, endIdx;
Quadtree_Node *NE, *NW, *SW, *SE;
public:
__host__ __device__ Quadtree_Node() : idx(-1), startIdx(-1), endIdx(-1), NE(NULL), NW(NULL), SW(NULL), SE(NULL){
}
__host__ __device__ bool isNull(){
return (idx == -1);
}
__host__ __device__ void setIdx(int idx){
this->idx = idx;
}
__host__ __device__ int getIdx(){
return idx;
}
__host__ __device__ void setBoundingBox(float x,float y,float X,float Y){
bb.set(x, y, X, Y);
}
__host__ __device__ __forceinline__ Bounding_Box& getBoundingBox(){
return bb;
}
__host__ __device__ void setRange(int s, int e){
startIdx = s;
endIdx = e;
}
__host__ __device__ __forceinline__ Quadtree_Node* getSW(){
return SW;
}
__host__ __device__ __forceinline__ Quadtree_Node* getSE(){
return SE;
}
__host__ __device__ __forceinline__ Quadtree_Node* getNW(){
return NW;
}
__host__ __device__ __forceinline__ Quadtree_Node* getNE(){
return NE;
}
__host__ __device__ __forceinline__ void setSW( Quadtree_Node* ptr){
SW = ptr;
}
__host__ __device__ __forceinline__ void setNW( Quadtree_Node* ptr){
NW = ptr;
}
__host__ __device__ __forceinline__ void setSE( Quadtree_Node* ptr){
SE = ptr;
}
__host__ __device__ __forceinline__ void setNE( Quadtree_Node* ptr){
NE = ptr;
}
__host__ __device__ __forceinline__ int getStartIdx(){
return startIdx;
}
__host__ __device__ __forceinline__ int getEndIdx(){
return endIdx;
}
__host__ __device__ __forceinline__ int numberOfPoints(){
return endIdx - startIdx + 1;
}
};
struct Random_generator
{
__host__ __device__ unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__host__ __device__ __forceinline__ thrust::tuple<float, float> operator()()
{
unsigned seed = hash( blockIdx.x*blockDim.x + threadIdx.x );
thrust::default_random_engine rng(seed);
thrust::random::uniform_real_distribution<float> distrib;
return thrust::make_tuple( distrib(rng), distrib(rng) );
}
};
class Parameters
{
public:
const int min_points_per_node;
//Introduced to minimise shifting of points
//can have values only 0 and 1 based on slot
//points[points_slot] is input slot
//points[(points_slot+1)%2] is output slot
int points_slot;
__host__ __device__ Parameters( int mppn ) : min_points_per_node(mppn), points_slot(0) {}
//copy constructor for the evaluation of children of current node
__host__ __device__ Parameters( Parameters prm, bool ) :
min_points_per_node(prm.min_points_per_node),
points_slot((prm.points_slot+1)%2)
{}
};
__global__ void buildQuadtree( Quadtree_Node *root, Points *points, Parameters prmtrs){
const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warpSize;
//shared memory
extern __shared__ int smem[];
//warp_id and lane_id
const int warp_id = threadIdx.x / warpSize;
const int lane_id = threadIdx.x % warpSize;
// Addresses of shared Memory
volatile int *s_num_pts[4];
for( int i = 0 ; i < 4 ; ++i )
s_num_pts[i] = (volatile int *) &smem[i*NUM_WARPS_PER_BLOCK];
int NUM_POINTS = root->numberOfPoints();
Bounding_Box &box = root->getBoundingBox();
//stop recursion if num_points <= minimum number of points required for recursion
if( NUM_POINTS <= prmtrs.min_points_per_node){
//If in current iteration the points are in slot 1
//shift them to slot 0
//we want the output in the slot 0
if(prmtrs.points_slot == 1)
{
int it = root->getStartIdx(), end = root->getEndIdx();
for( it += threadIdx.x; it < end ; it += NUM_THREADS_PER_BLOCK){
if( it < end )
points[0].setPoint(it, points[1].getPoint(it));
}
}
return;
}
//get Center of the bounding box
float2 center;
center = box.computeCenter();
int NUM_POINTS_PER_WARP = max( warpSize, ( NUM_POINTS + NUM_WARPS_PER_BLOCK - 1 ) / NUM_WARPS_PER_BLOCK );
int warp_begin = root->getStartIdx() + warp_id*NUM_POINTS_PER_WARP;
int warp_end = min(warp_begin + NUM_POINTS_PER_WARP, root->getEndIdx());
//reset counts of warps
if( lane_id == 0 )
{
s_num_pts[0][warp_id] = 0;
s_num_pts[1][warp_id] = 0;
s_num_pts[2][warp_id] = 0;
s_num_pts[3][warp_id] = 0;
}
//input points
const Points &input = points[prmtrs.points_slot];
//__any_sync(unsigned mask, predicate):
//Evaluate predicate for all non-exited threads in mask and return non-zero if and only if predicate evaluates to non-zero for any of them.
//count points in each warp that belong to which child
for( int itr = warp_begin + lane_id ; __any(itr < warp_end ) ; itr += warpSize){
bool is_active = itr < warp_end;
//get the coordinates of the point
float2 curP;
if(is_active)
curP = input.getPoint(itr);
else
curP = make_float2(0.0f, 0.0f);
//consider standard anticlockwise quadrants for numbering 0 to 3
//__ballot_sync(unsigned mask, predicate):
//Evaluate predicate for all non-exited threads in mask and return an integer whose Nth bit is set if and only if predicate evaluates to non-zero for the Nth thread of the warp and the Nth thread is active.
//__popc
//Count the number of bits that are set to 1 in a 32 bit integer.
//top-right Quadrant (Quadrant - I)
bool pred = is_active && curP.x >= center.x && curP.y >= center.y;
int curMask = __ballot( pred );
int cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[0][warp_id] += cnt;
//top-left Quadrant (Quadrant - II)
pred = is_active && curP.x < center.x && curP.y >= center.y;
curMask = __ballot(pred);
cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[1][warp_id] += cnt;
//bottom-left Quadrant (Quadrant - III)
pred = is_active && curP.x < center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[2][warp_id] += cnt;
//bottom-right Quadrant (Quadrant - IV)
pred = is_active && curP.x >= center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[3][warp_id] += cnt;
}
//sychronize warps
//__syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to proceed
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
// Scan the warps' results to know the "global" numbers.
// First 4 warps scan the numbers of points per child (inclusive scan).
// In the later code we have used warp id to select the quadrant and lane_id to select a warp.
if( warp_id < 4 )
{
int num_pts = lane_id < NUM_WARPS_PER_BLOCK ? s_num_pts[warp_id][lane_id] : 0;
#pragma unroll
for( int offset = 1 ; offset < NUM_WARPS_PER_BLOCK ; offset *= 2 )
{
//T __shfl_up_sync(unsigned mask, T var, unsigned int delta, int width=warpSize);
int n = __shfl_up(num_pts, offset, NUM_WARPS_PER_BLOCK );
if( lane_id >= offset )
num_pts += n;
}
if( lane_id < NUM_WARPS_PER_BLOCK )
s_num_pts[warp_id][lane_id] = num_pts;
}
//after this we will have the local offsets, i.e , if we have a warp with id X
//then s_num_pts[0][x] will store the number of points having warp id <= x
//and belong to the 0th quadrant
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
// Compute global offsets.
//here lane_id will index the warps
if( warp_id == 0 )
{
int sum = s_num_pts[0][NUM_WARPS_PER_BLOCK-1];
for( int row = 1 ; row < 4 ; ++row )
{
int tmp = s_num_pts[row][NUM_WARPS_PER_BLOCK-1];
if( lane_id < NUM_WARPS_PER_BLOCK )
s_num_pts[row][lane_id] = s_num_pts[row][lane_id] + sum;
sum += tmp;
}
}
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
//after this we have the global offsets, i.e, if warp id is X and quadrant q
//then s_num_pts[q][x] will store the number of points having warp id <= x
//and belong to the quadrant <= q
//make the Scan independent of the quadtree node you are currently in.
// for this we just have to add the number of points that come before processing of the current node.
if(threadIdx.x < 4*NUM_WARPS_PER_BLOCK){
int val = (threadIdx.x == 0) ? 0 : smem[threadIdx.x - 1];
smem[threadIdx.x] = val + root->getStartIdx();
}
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
//move points to the next slot
Points &output = points[(prmtrs.points_slot+1)%2];
//Mask for threads in a warp that are less than the current lane_id
int lane_mask_lt = (1 << lane_id) - 1;
// Move Points to the appropriate slot
// Quadtree sort implementation
for( int itr = warp_begin + lane_id ; __any(itr < warp_end ) ; itr += warpSize){
bool is_active = itr < warp_end;
float2 curP;
if(is_active){
curP = input.getPoint(itr);
}
else{
curP = make_float2(0.0f, 0.0f);
}
//counting QUADRANT I points
bool pred =is_active && curP.x >= center.x && curP.y >= center.y;
int curMask = __ballot(pred);
int cnt = __popc( curMask & lane_mask_lt );
int dest = s_num_pts[0][warp_id] + cnt;
if( pred )
output.setPoint(dest, curP);
if( lane_id == 0 )
s_num_pts[0][warp_id] += __popc( curMask);
//countin QUADRANT II points
pred = is_active && curP.x < center.x && curP.y >= center.y;
curMask = __ballot(pred);
cnt = __popc(curMask & lane_mask_lt);
dest = s_num_pts[1][warp_id] + cnt;
if(pred)
output.setPoint(dest, curP);
if( lane_id == 0)
s_num_pts[1][warp_id] += __popc( curMask );
//countin QUADRANT III points
pred = is_active && curP.x < center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc(curMask & lane_mask_lt);
dest = s_num_pts[2][warp_id] + cnt;
if(pred)
output.setPoint(dest, curP);
if( lane_id == 0)
s_num_pts[2][warp_id] += __popc( curMask);
//countin QUADRANT IV points
pred = is_active && curP.x >= center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc(curMask & lane_mask_lt);
dest = s_num_pts[3][warp_id] + cnt;
if(pred)
output.setPoint(dest, curP);
if( lane_id == 0)
s_num_pts[3][warp_id] += __popc( curMask );
}
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
//last thread will launch new block
if( threadIdx.x == NUM_THREADS_PER_BLOCK-1){
//create children for next level
// set index, bb, startIdx, endIdx and NE, NW, SE, SW children.
//Index is used just for sake of future extension if some changes are required then
//children nodes
// std::cout << "( " << box.getxMin() << "," << box.getyMin() << ") , (" << box.getxMax() << "," << box.getyMax() << ") " << std::endl;
//print top left and top right points
Quadtree_Node* NEC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
Quadtree_Node* NWC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
Quadtree_Node* SWC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
Quadtree_Node* SEC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
//set Bounding Box
//printf("Center: %f %f\n", center.x, center.y);
NEC->setBoundingBox(center.x, center.y, box.getxMax(), box.getyMax());
NWC->setBoundingBox(box.getxMin(), center.y, center.x, box.getyMax());
SWC->setBoundingBox(box.getxMin(), box.getyMin(), center.x, center.y);
SEC->setBoundingBox(center.x, box.getyMin(), box.getxMax(), center.y);
//set the start and end ranges
//print the range of indices for children
/* printf("(%d, %d), ", root->getStartIdx(), s_num_pts[0][warp_id]);
printf("(%d, %d), ", s_num_pts[0][warp_id], s_num_pts[1][warp_id]);
printf("(%d, %d), ", s_num_pts[1][warp_id], s_num_pts[2][warp_id]);
printf("(%d, %d)\n", s_num_pts[2][warp_id], s_num_pts[3][warp_id]);
*/
NEC->setRange(root->getStartIdx(), s_num_pts[0][warp_id]);
NWC->setRange(s_num_pts[0][warp_id], s_num_pts[1][warp_id]);
SWC->setRange(s_num_pts[1][warp_id], s_num_pts[2][warp_id]);
SEC->setRange(s_num_pts[2][warp_id], s_num_pts[3][warp_id]);
//set the root children
root->setNE(NEC);
root->setNW(NWC);
root->setSW(SWC);
root->setSE(SEC);
//launch children
hipLaunchKernelGGL(( buildQuadtree), dim3(1), dim3(NUM_THREADS_PER_BLOCK), 4*NUM_WARPS_PER_BLOCK*sizeof(int), 0, NEC, points, Parameters(prmtrs, true));
hipLaunchKernelGGL(( buildQuadtree), dim3(1), dim3(NUM_THREADS_PER_BLOCK), 4*NUM_WARPS_PER_BLOCK*sizeof(int), 0, NWC, points, Parameters(prmtrs, true));
hipLaunchKernelGGL(( buildQuadtree), dim3(1), dim3(NUM_THREADS_PER_BLOCK), 4*NUM_WARPS_PER_BLOCK*sizeof(int), 0, SWC, points, Parameters(prmtrs, true));
hipLaunchKernelGGL(( buildQuadtree), dim3(1), dim3(NUM_THREADS_PER_BLOCK), 4*NUM_WARPS_PER_BLOCK*sizeof(int), 0, SEC, points, Parameters(prmtrs, true));
}
}
__global__ void printQuadtree( Quadtree_Node *root){
Bounding_Box box = root->getBoundingBox();
box.printBox();
if(root->getNE() != NULL){
hipLaunchKernelGGL(( printQuadtree), dim3(1), dim3(1), 0, 0, root->getNE());
hipLaunchKernelGGL(( printQuadtree), dim3(1), dim3(1), 0, 0, root->getNW());
hipLaunchKernelGGL(( printQuadtree), dim3(1), dim3(1), 0, 0, root->getSE());
hipLaunchKernelGGL(( printQuadtree), dim3(1), dim3(1), 0, 0, root->getSW());
}
}
/*
bool check_quadtree( const Quadtree_node *nodes, int idx, int num_pts, Points *pts, Parameters params )
{
const Quadtree_node &node = nodes[idx];
int num_points = node.num_points();
if( params.depth == params.max_depth || num_points <= params.min_points_per_node )
{
int num_points_in_children = 0;
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+0].num_points();
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+1].num_points();
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+2].num_points();
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+3].num_points();
if( num_points_in_children != node.num_points() )
return false;
return check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+0, num_pts, pts, Parameters( params, true ) ) &&
check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+1, num_pts, pts, Parameters( params, true ) ) &&
check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+2, num_pts, pts, Parameters( params, true ) ) &&
check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+3, num_pts, pts, Parameters( params, true ) );
}
const Bounding_box &bbox = node.bounding_box();
for( int it = node.points_begin() ; it < node.points_end() ; ++it )
{
if( it >= num_pts )
return false;
float2 p = pts->get_point( it );
if( !bbox.contains( p ) )
return false;
}
return true;
}
*/
int main()
{
//parameters
std::string inputFile = "2.5width_4patels.txt";
std::string outputFile = "1200BoundingBox.txt";
freopen(outputFile.c_str() , "w", stdout);
const int max_depth = 10;
const int min_points_per_node = 10;
int num_points = -1;
//Set Cuda Device
int device_count = 0, device = -1, warp_size = 0;
checkCudaErrors( hipGetDeviceCount( &device_count ) );
for( int i = 0 ; i < device_count ; ++i )
{
hipDeviceProp_t properties;
checkCudaErrors( hipGetDeviceProperties( &properties, i ) );
if( properties.major > 3 || ( properties.major == 3 && properties.minor >= 5 ) )
{
device = i;
warp_size = properties.warpSize;
std::cout << "Running on GPU: " << i << " (" << properties.name << ")" << std::endl;
std::cout << "Warp Size: " << warp_size << std::endl;
std::cout << "Threads Per Block: " << properties.maxThreadsPerBlock<< std::endl;
break;
}
std::cout << "GPU " << i << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl;
}
if( device == -1 )
{
//cdpQuadTree requires SM 3.5 or higher to use CUDA Dynamic Parallelism. Exiting...
exit(EXIT_SUCCESS);
}
hipSetDevice(device);
//Read Points from file and put it into x0(X points) and y0(Y Points)
clock_t start = clock();
std::list<float> stlX, stlY;
std::ifstream source(inputFile);
if(source.is_open()){
int i = 0;
for(std::string line;std::getline(source, line); i+=1) //read stream line by line
{
std::istringstream in(line);
float x, y;
in >> x >> y;
stlX.push_back(x);
stlY.push_back(y);
}
}
else{
printf("No");
exit(1);
}
clock_t end = clock();
double run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "File Reading Time: " << run_time << std::endl;
num_points = stlX.size();
std::cout << "Number of Points: " << num_points << std::endl;
start = clock();
hipFree(0);
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "hipFree Time: " << run_time << std::endl;
start = clock();
thrust::device_vector<float> x0( stlX.begin(), stlX.end() );
thrust::device_vector<float> y0( stlY.begin(), stlY.end() );
thrust::device_vector<float> x1( num_points );
thrust::device_vector<float> y1( num_points );
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "Data Conversion Time: " << run_time << std::endl;
//copy pointers to the points into the device because kernels don't support device_vector as input they accept raw_pointers
//Thrust data types are not understood by a CUDA kernel and need to be converted back to its underlying pointer.
//host_points
Points h_points[2];
h_points[0].set( thrust::raw_pointer_cast( &x0[0] ), thrust::raw_pointer_cast( &y0[0] ) );
h_points[1].set( thrust::raw_pointer_cast( &x1[0] ), thrust::raw_pointer_cast( &y1[0] ) );
//device_points
Points *d_points;
checkCudaErrors( hipMalloc( (void**) &d_points, 2*sizeof(Points) ) );
checkCudaErrors( hipMemcpy( d_points, h_points, 2*sizeof(Points), hipMemcpyHostToDevice ) );
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "GPU Data Transfer Time: " << run_time << std::endl;
//Setting Cuda Heap size for dynamic memory allocation
size_t size = 1024*1024*1024;
hipDeviceSetLimit(hipLimitMallocHeapSize, size);
hipDeviceGetLimit(&size, hipLimitMallocHeapSize);
//Copy root node from host to device
Quadtree_Node h_root;
h_root.setRange(0, num_points);
h_root.setIdx(1024);
Quadtree_Node* d_root;
checkCudaErrors( hipMalloc( (void**) &d_root, sizeof(Quadtree_Node)));
checkCudaErrors( hipMemcpy( d_root, &h_root, sizeof(Quadtree_Node), hipMemcpyHostToDevice));
//set the recursion limit based on max_depth
//maximum possible depth is 24 levels
hipDeviceSetLimit( hipLimitDevRuntimeSyncDepth, max_depth );
Parameters prmtrs( min_points_per_node );
const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warp_size;
const int SHARED_MEM_SIZE = 4*NUM_WARPS_PER_BLOCK*sizeof(int);
start = clock();
const int NUM_OF_BLOCKS = 1;
hipLaunchKernelGGL(( buildQuadtree), dim3(NUM_OF_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), SHARED_MEM_SIZE, 0, d_root, d_points, prmtrs);
hipDeviceSynchronize(); //wait until all threads complete execution
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "Kernel Execution Time: " << run_time << std::endl;
checkCudaErrors( hipGetLastError() );
hipLaunchKernelGGL(( printQuadtree), dim3(1),dim3(1), 0, 0, d_root);
hipDeviceSynchronize();
return 0;
}
| 2206d412ba6fef36df499f5d798937b63efb1546.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <helper_cuda.h>
#include <list>
#include <sstream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <time.h>
#include <iostream>
#define FULL_MASK 0xffffffff
#define NUM_THREADS_PER_BLOCK 512
class Points
{
float *X;
float *Y;
public:
__host__ __device__ Points() : X(NULL), Y(NULL) {}
__host__ __device__ Points( float *x, float *y ) : X(x), Y(y) {}
__host__ __device__ __forceinline__ float2 getPoint( int idx ) const
{
return make_float2( X[idx], Y[idx] );
}
__host__ __device__ __forceinline__ void setPoint( int idx, const float2 &p )
{
X[idx] = p.x;
Y[idx] = p.y;
}
__host__ __device__ __forceinline__ void set( float *x, float *y )
{
X = x;
Y = y;
}
};
class Bounding_Box{
float xMin , xMax, yMin, yMax;
public:
__host__ __device__ Bounding_Box(){
xMin = -700;
yMin = -700;
xMax = 700;
yMax = 700;
}
__host__ __device__ float2 computeCenter(){
float2 center;
center.x = 0.5f * ( xMin + xMax);
center.y = 0.5f * ( yMin + yMax );
return center;
}
__host__ __device__ __forceinline__ float getxMax() const {
return xMax;
}
__host__ __device__ __forceinline__ float getyMax() const {
return yMax;
}
__host__ __device__ __forceinline__ float getyMin() const {
return yMin;
}
__host__ __device__ __forceinline__ float getxMin() const {
return xMin;
}
__host__ __device__ __forceinline__ void printBox() const {
printf("%f %f %f %f ", xMin, yMax, xMax, yMax);
printf("%f %f %f %f\n", xMax, yMin, xMin , yMin );
}
__host__ __device__ bool contains(const float2 &p) const {
return (p.x >= xMin && p.y >= yMin && p.x < xMax && p.y < yMax);
}
__host__ __device__ void set(float x, float y, float X, float Y){
xMin = x;
yMin = y;
xMax = X;
yMax = Y;
}
};
class Quadtree_Node{
//node index;
int idx;
Bounding_Box bb;
//startIdx of points in the bb for the global data array
int startIdx, endIdx;
Quadtree_Node *NE, *NW, *SW, *SE;
public:
__host__ __device__ Quadtree_Node() : idx(-1), startIdx(-1), endIdx(-1), NE(NULL), NW(NULL), SW(NULL), SE(NULL){
}
__host__ __device__ bool isNull(){
return (idx == -1);
}
__host__ __device__ void setIdx(int idx){
this->idx = idx;
}
__host__ __device__ int getIdx(){
return idx;
}
__host__ __device__ void setBoundingBox(float x,float y,float X,float Y){
bb.set(x, y, X, Y);
}
__host__ __device__ __forceinline__ Bounding_Box& getBoundingBox(){
return bb;
}
__host__ __device__ void setRange(int s, int e){
startIdx = s;
endIdx = e;
}
__host__ __device__ __forceinline__ Quadtree_Node* getSW(){
return SW;
}
__host__ __device__ __forceinline__ Quadtree_Node* getSE(){
return SE;
}
__host__ __device__ __forceinline__ Quadtree_Node* getNW(){
return NW;
}
__host__ __device__ __forceinline__ Quadtree_Node* getNE(){
return NE;
}
__host__ __device__ __forceinline__ void setSW( Quadtree_Node* ptr){
SW = ptr;
}
__host__ __device__ __forceinline__ void setNW( Quadtree_Node* ptr){
NW = ptr;
}
__host__ __device__ __forceinline__ void setSE( Quadtree_Node* ptr){
SE = ptr;
}
__host__ __device__ __forceinline__ void setNE( Quadtree_Node* ptr){
NE = ptr;
}
__host__ __device__ __forceinline__ int getStartIdx(){
return startIdx;
}
__host__ __device__ __forceinline__ int getEndIdx(){
return endIdx;
}
__host__ __device__ __forceinline__ int numberOfPoints(){
return endIdx - startIdx + 1;
}
};
struct Random_generator
{
__host__ __device__ unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__host__ __device__ __forceinline__ thrust::tuple<float, float> operator()()
{
unsigned seed = hash( blockIdx.x*blockDim.x + threadIdx.x );
thrust::default_random_engine rng(seed);
thrust::random::uniform_real_distribution<float> distrib;
return thrust::make_tuple( distrib(rng), distrib(rng) );
}
};
class Parameters
{
public:
const int min_points_per_node;
//Introduced to minimise shifting of points
//can have values only 0 and 1 based on slot
//points[points_slot] is input slot
//points[(points_slot+1)%2] is output slot
int points_slot;
__host__ __device__ Parameters( int mppn ) : min_points_per_node(mppn), points_slot(0) {}
//copy constructor for the evaluation of children of current node
__host__ __device__ Parameters( Parameters prm, bool ) :
min_points_per_node(prm.min_points_per_node),
points_slot((prm.points_slot+1)%2)
{}
};
__global__ void buildQuadtree( Quadtree_Node *root, Points *points, Parameters prmtrs){
const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warpSize;
//shared memory
extern __shared__ int smem[];
//warp_id and lane_id
const int warp_id = threadIdx.x / warpSize;
const int lane_id = threadIdx.x % warpSize;
// Addresses of shared Memory
volatile int *s_num_pts[4];
for( int i = 0 ; i < 4 ; ++i )
s_num_pts[i] = (volatile int *) &smem[i*NUM_WARPS_PER_BLOCK];
int NUM_POINTS = root->numberOfPoints();
Bounding_Box &box = root->getBoundingBox();
//stop recursion if num_points <= minimum number of points required for recursion
if( NUM_POINTS <= prmtrs.min_points_per_node){
//If in current iteration the points are in slot 1
//shift them to slot 0
//we want the output in the slot 0
if(prmtrs.points_slot == 1)
{
int it = root->getStartIdx(), end = root->getEndIdx();
for( it += threadIdx.x; it < end ; it += NUM_THREADS_PER_BLOCK){
if( it < end )
points[0].setPoint(it, points[1].getPoint(it));
}
}
return;
}
//get Center of the bounding box
float2 center;
center = box.computeCenter();
int NUM_POINTS_PER_WARP = max( warpSize, ( NUM_POINTS + NUM_WARPS_PER_BLOCK - 1 ) / NUM_WARPS_PER_BLOCK );
int warp_begin = root->getStartIdx() + warp_id*NUM_POINTS_PER_WARP;
int warp_end = min(warp_begin + NUM_POINTS_PER_WARP, root->getEndIdx());
//reset counts of warps
if( lane_id == 0 )
{
s_num_pts[0][warp_id] = 0;
s_num_pts[1][warp_id] = 0;
s_num_pts[2][warp_id] = 0;
s_num_pts[3][warp_id] = 0;
}
//input points
const Points &input = points[prmtrs.points_slot];
//__any_sync(unsigned mask, predicate):
//Evaluate predicate for all non-exited threads in mask and return non-zero if and only if predicate evaluates to non-zero for any of them.
//count points in each warp that belong to which child
for( int itr = warp_begin + lane_id ; __any(itr < warp_end ) ; itr += warpSize){
bool is_active = itr < warp_end;
//get the coordinates of the point
float2 curP;
if(is_active)
curP = input.getPoint(itr);
else
curP = make_float2(0.0f, 0.0f);
//consider standard anticlockwise quadrants for numbering 0 to 3
//__ballot_sync(unsigned mask, predicate):
//Evaluate predicate for all non-exited threads in mask and return an integer whose Nth bit is set if and only if predicate evaluates to non-zero for the Nth thread of the warp and the Nth thread is active.
//__popc
//Count the number of bits that are set to 1 in a 32 bit integer.
//top-right Quadrant (Quadrant - I)
bool pred = is_active && curP.x >= center.x && curP.y >= center.y;
int curMask = __ballot( pred );
int cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[0][warp_id] += cnt;
//top-left Quadrant (Quadrant - II)
pred = is_active && curP.x < center.x && curP.y >= center.y;
curMask = __ballot(pred);
cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[1][warp_id] += cnt;
//bottom-left Quadrant (Quadrant - III)
pred = is_active && curP.x < center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[2][warp_id] += cnt;
//bottom-right Quadrant (Quadrant - IV)
pred = is_active && curP.x >= center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc( curMask );
if( cnt > 0 && lane_id == 0 )
s_num_pts[3][warp_id] += cnt;
}
//sychronize warps
//__syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to proceed
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
// Scan the warps' results to know the "global" numbers.
// First 4 warps scan the numbers of points per child (inclusive scan).
// In the later code we have used warp id to select the quadrant and lane_id to select a warp.
if( warp_id < 4 )
{
int num_pts = lane_id < NUM_WARPS_PER_BLOCK ? s_num_pts[warp_id][lane_id] : 0;
#pragma unroll
for( int offset = 1 ; offset < NUM_WARPS_PER_BLOCK ; offset *= 2 )
{
//T __shfl_up_sync(unsigned mask, T var, unsigned int delta, int width=warpSize);
int n = __shfl_up(num_pts, offset, NUM_WARPS_PER_BLOCK );
if( lane_id >= offset )
num_pts += n;
}
if( lane_id < NUM_WARPS_PER_BLOCK )
s_num_pts[warp_id][lane_id] = num_pts;
}
//after this we will have the local offsets, i.e , if we have a warp with id X
//then s_num_pts[0][x] will store the number of points having warp id <= x
//and belong to the 0th quadrant
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
// Compute global offsets.
//here lane_id will index the warps
if( warp_id == 0 )
{
int sum = s_num_pts[0][NUM_WARPS_PER_BLOCK-1];
for( int row = 1 ; row < 4 ; ++row )
{
int tmp = s_num_pts[row][NUM_WARPS_PER_BLOCK-1];
if( lane_id < NUM_WARPS_PER_BLOCK )
s_num_pts[row][lane_id] = s_num_pts[row][lane_id] + sum;
sum += tmp;
}
}
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
//after this we have the global offsets, i.e, if warp id is X and quadrant q
//then s_num_pts[q][x] will store the number of points having warp id <= x
//and belong to the quadrant <= q
//make the Scan independent of the quadtree node you are currently in.
// for this we just have to add the number of points that come before processing of the current node.
if(threadIdx.x < 4*NUM_WARPS_PER_BLOCK){
int val = (threadIdx.x == 0) ? 0 : smem[threadIdx.x - 1];
smem[threadIdx.x] = val + root->getStartIdx();
}
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
//move points to the next slot
Points &output = points[(prmtrs.points_slot+1)%2];
//Mask for threads in a warp that are less than the current lane_id
int lane_mask_lt = (1 << lane_id) - 1;
// Move Points to the appropriate slot
// Quadtree sort implementation
for( int itr = warp_begin + lane_id ; __any(itr < warp_end ) ; itr += warpSize){
bool is_active = itr < warp_end;
float2 curP;
if(is_active){
curP = input.getPoint(itr);
}
else{
curP = make_float2(0.0f, 0.0f);
}
//counting QUADRANT I points
bool pred =is_active && curP.x >= center.x && curP.y >= center.y;
int curMask = __ballot(pred);
int cnt = __popc( curMask & lane_mask_lt );
int dest = s_num_pts[0][warp_id] + cnt;
if( pred )
output.setPoint(dest, curP);
if( lane_id == 0 )
s_num_pts[0][warp_id] += __popc( curMask);
//countin QUADRANT II points
pred = is_active && curP.x < center.x && curP.y >= center.y;
curMask = __ballot(pred);
cnt = __popc(curMask & lane_mask_lt);
dest = s_num_pts[1][warp_id] + cnt;
if(pred)
output.setPoint(dest, curP);
if( lane_id == 0)
s_num_pts[1][warp_id] += __popc( curMask );
//countin QUADRANT III points
pred = is_active && curP.x < center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc(curMask & lane_mask_lt);
dest = s_num_pts[2][warp_id] + cnt;
if(pred)
output.setPoint(dest, curP);
if( lane_id == 0)
s_num_pts[2][warp_id] += __popc( curMask);
//countin QUADRANT IV points
pred = is_active && curP.x >= center.x && curP.y < center.y;
curMask = __ballot(pred);
cnt = __popc(curMask & lane_mask_lt);
dest = s_num_pts[3][warp_id] + cnt;
if(pred)
output.setPoint(dest, curP);
if( lane_id == 0)
s_num_pts[3][warp_id] += __popc( curMask );
}
__syncthreads();
/* if(threadIdx.x == NUM_THREADS_PER_BLOCK - 1 && root->getIdx() == 1024){
printf("Quadrant I : %d, %d \n", s_num_pts[0][0], s_num_pts[0][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[0][i]);
}
printf("\nQuadrant II : %d, %d \n", s_num_pts[1][0], s_num_pts[1][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[1][i]);
}
printf("\nQuadrant III : %d, %d \n", s_num_pts[2][0], s_num_pts[2][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[2][i]);
}
printf("\nQuadrant IV : %d, %d \n", s_num_pts[3][0], s_num_pts[3][NUM_WARPS_PER_BLOCK-1]);
for(int i = 0;i<NUM_WARPS_PER_BLOCK;i++){
printf("%d ", s_num_pts[3][i]);
}
printf("\n\n\n");
}
__syncthreads(); */
//last thread will launch new block
if( threadIdx.x == NUM_THREADS_PER_BLOCK-1){
//create children for next level
// set index, bb, startIdx, endIdx and NE, NW, SE, SW children.
//Index is used just for sake of future extension if some changes are required then
//children nodes
// std::cout << "( " << box.getxMin() << "," << box.getyMin() << ") , (" << box.getxMax() << "," << box.getyMax() << ") " << std::endl;
//print top left and top right points
Quadtree_Node* NEC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
Quadtree_Node* NWC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
Quadtree_Node* SWC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
Quadtree_Node* SEC = (Quadtree_Node*)malloc(sizeof(Quadtree_Node));
//set Bounding Box
//printf("Center: %f %f\n", center.x, center.y);
NEC->setBoundingBox(center.x, center.y, box.getxMax(), box.getyMax());
NWC->setBoundingBox(box.getxMin(), center.y, center.x, box.getyMax());
SWC->setBoundingBox(box.getxMin(), box.getyMin(), center.x, center.y);
SEC->setBoundingBox(center.x, box.getyMin(), box.getxMax(), center.y);
//set the start and end ranges
//print the range of indices for children
/* printf("(%d, %d), ", root->getStartIdx(), s_num_pts[0][warp_id]);
printf("(%d, %d), ", s_num_pts[0][warp_id], s_num_pts[1][warp_id]);
printf("(%d, %d), ", s_num_pts[1][warp_id], s_num_pts[2][warp_id]);
printf("(%d, %d)\n", s_num_pts[2][warp_id], s_num_pts[3][warp_id]);
*/
NEC->setRange(root->getStartIdx(), s_num_pts[0][warp_id]);
NWC->setRange(s_num_pts[0][warp_id], s_num_pts[1][warp_id]);
SWC->setRange(s_num_pts[1][warp_id], s_num_pts[2][warp_id]);
SEC->setRange(s_num_pts[2][warp_id], s_num_pts[3][warp_id]);
//set the root children
root->setNE(NEC);
root->setNW(NWC);
root->setSW(SWC);
root->setSE(SEC);
//launch children
buildQuadtree<<<1, NUM_THREADS_PER_BLOCK, 4*NUM_WARPS_PER_BLOCK*sizeof(int)>>>(NEC, points, Parameters(prmtrs, true));
buildQuadtree<<<1, NUM_THREADS_PER_BLOCK, 4*NUM_WARPS_PER_BLOCK*sizeof(int)>>>(NWC, points, Parameters(prmtrs, true));
buildQuadtree<<<1, NUM_THREADS_PER_BLOCK, 4*NUM_WARPS_PER_BLOCK*sizeof(int)>>>(SWC, points, Parameters(prmtrs, true));
buildQuadtree<<<1, NUM_THREADS_PER_BLOCK, 4*NUM_WARPS_PER_BLOCK*sizeof(int)>>>(SEC, points, Parameters(prmtrs, true));
}
}
__global__ void printQuadtree( Quadtree_Node *root){
Bounding_Box box = root->getBoundingBox();
box.printBox();
if(root->getNE() != NULL){
printQuadtree<<<1, 1>>>(root->getNE());
printQuadtree<<<1, 1>>>(root->getNW());
printQuadtree<<<1, 1>>>(root->getSE());
printQuadtree<<<1, 1>>>(root->getSW());
}
}
/*
bool check_quadtree( const Quadtree_node *nodes, int idx, int num_pts, Points *pts, Parameters params )
{
const Quadtree_node &node = nodes[idx];
int num_points = node.num_points();
if( params.depth == params.max_depth || num_points <= params.min_points_per_node )
{
int num_points_in_children = 0;
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+0].num_points();
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+1].num_points();
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+2].num_points();
num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+3].num_points();
if( num_points_in_children != node.num_points() )
return false;
return check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+0, num_pts, pts, Parameters( params, true ) ) &&
check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+1, num_pts, pts, Parameters( params, true ) ) &&
check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+2, num_pts, pts, Parameters( params, true ) ) &&
check_quadtree( &nodes[params.num_nodes_at_this_level], 4*idx+3, num_pts, pts, Parameters( params, true ) );
}
const Bounding_box &bbox = node.bounding_box();
for( int it = node.points_begin() ; it < node.points_end() ; ++it )
{
if( it >= num_pts )
return false;
float2 p = pts->get_point( it );
if( !bbox.contains( p ) )
return false;
}
return true;
}
*/
int main()
{
//parameters
std::string inputFile = "2.5width_4patels.txt";
std::string outputFile = "1200BoundingBox.txt";
freopen(outputFile.c_str() , "w", stdout);
const int max_depth = 10;
const int min_points_per_node = 10;
int num_points = -1;
//Set Cuda Device
int device_count = 0, device = -1, warp_size = 0;
checkCudaErrors( cudaGetDeviceCount( &device_count ) );
for( int i = 0 ; i < device_count ; ++i )
{
cudaDeviceProp properties;
checkCudaErrors( cudaGetDeviceProperties( &properties, i ) );
if( properties.major > 3 || ( properties.major == 3 && properties.minor >= 5 ) )
{
device = i;
warp_size = properties.warpSize;
std::cout << "Running on GPU: " << i << " (" << properties.name << ")" << std::endl;
std::cout << "Warp Size: " << warp_size << std::endl;
std::cout << "Threads Per Block: " << properties.maxThreadsPerBlock<< std::endl;
break;
}
std::cout << "GPU " << i << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl;
}
if( device == -1 )
{
//cdpQuadTree requires SM 3.5 or higher to use CUDA Dynamic Parallelism. Exiting...
exit(EXIT_SUCCESS);
}
cudaSetDevice(device);
//Read Points from file and put it into x0(X points) and y0(Y Points)
clock_t start = clock();
std::list<float> stlX, stlY;
std::ifstream source(inputFile);
if(source.is_open()){
int i = 0;
for(std::string line;std::getline(source, line); i+=1) //read stream line by line
{
std::istringstream in(line);
float x, y;
in >> x >> y;
stlX.push_back(x);
stlY.push_back(y);
}
}
else{
printf("No");
exit(1);
}
clock_t end = clock();
double run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "File Reading Time: " << run_time << std::endl;
num_points = stlX.size();
std::cout << "Number of Points: " << num_points << std::endl;
start = clock();
cudaFree(0);
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "cudaFree Time: " << run_time << std::endl;
start = clock();
thrust::device_vector<float> x0( stlX.begin(), stlX.end() );
thrust::device_vector<float> y0( stlY.begin(), stlY.end() );
thrust::device_vector<float> x1( num_points );
thrust::device_vector<float> y1( num_points );
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "Data Conversion Time: " << run_time << std::endl;
//copy pointers to the points into the device because kernels don't support device_vector as input they accept raw_pointers
//Thrust data types are not understood by a CUDA kernel and need to be converted back to its underlying pointer.
//host_points
Points h_points[2];
h_points[0].set( thrust::raw_pointer_cast( &x0[0] ), thrust::raw_pointer_cast( &y0[0] ) );
h_points[1].set( thrust::raw_pointer_cast( &x1[0] ), thrust::raw_pointer_cast( &y1[0] ) );
//device_points
Points *d_points;
checkCudaErrors( cudaMalloc( (void**) &d_points, 2*sizeof(Points) ) );
checkCudaErrors( cudaMemcpy( d_points, h_points, 2*sizeof(Points), cudaMemcpyHostToDevice ) );
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "GPU Data Transfer Time: " << run_time << std::endl;
//Setting Cuda Heap size for dynamic memory allocation
size_t size = 1024*1024*1024;
cudaDeviceSetLimit(cudaLimitMallocHeapSize, size);
cudaDeviceGetLimit(&size, cudaLimitMallocHeapSize);
//Copy root node from host to device
Quadtree_Node h_root;
h_root.setRange(0, num_points);
h_root.setIdx(1024);
Quadtree_Node* d_root;
checkCudaErrors( cudaMalloc( (void**) &d_root, sizeof(Quadtree_Node)));
checkCudaErrors( cudaMemcpy( d_root, &h_root, sizeof(Quadtree_Node), cudaMemcpyHostToDevice));
//set the recursion limit based on max_depth
//maximum possible depth is 24 levels
cudaDeviceSetLimit( cudaLimitDevRuntimeSyncDepth, max_depth );
Parameters prmtrs( min_points_per_node );
const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warp_size;
const int SHARED_MEM_SIZE = 4*NUM_WARPS_PER_BLOCK*sizeof(int);
start = clock();
const int NUM_OF_BLOCKS = 1;
buildQuadtree<<<NUM_OF_BLOCKS, NUM_THREADS_PER_BLOCK, SHARED_MEM_SIZE>>>(d_root, d_points, prmtrs);
cudaDeviceSynchronize(); //wait until all threads complete execution
end = clock();
run_time = ((double)(end - start)/CLOCKS_PER_SEC);
std::cout << "Kernel Execution Time: " << run_time << std::endl;
checkCudaErrors( cudaGetLastError() );
printQuadtree<<<1,1>>>(d_root);
cudaDeviceSynchronize();
return 0;
}
|
192ea6b7c4e61baaf972bb8b5d9c6ef8a5ede320.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__device__ volatile int lock = -1;
__device__ volatile int counter = 0;;
__global__ void spinlol()
{
__shared__ int intraCTAlock;
if (!threadIdx.x && !threadIdx.y)
intraCTAlock = -1;
__syncthreads();
if (!threadIdx.x && !threadIdx.y)
while (atomicCAS((int*)&lock, -1, blockIdx.x) != -1);
__syncthreads();
if (threadIdx.x % 32 == 0)
{
while (atomicCAS(&intraCTAlock, -1, 12) != -1);
counter++;
__threadfence();
atomicExch(&intraCTAlock, -1);
}
__syncthreads();
if (!threadIdx.x && !threadIdx.y)
atomicExch((int*)&lock, -1);
}
int main(int argc, char** argv)
{
int hostcounter = -1;
hipLaunchKernelGGL(( spinlol), dim3(60), dim3(512), 0, 0, );
hipDeviceSynchronize();
printf("err = %s\n", hipGetErrorString(hipGetLastError()));
hipMemcpyFromSymbol(&hostcounter, "counter", sizeof(int), 0, hipMemcpyDeviceToHost);
printf("counter = %d\n", hostcounter);
}
| 192ea6b7c4e61baaf972bb8b5d9c6ef8a5ede320.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__device__ volatile int lock = -1;
__device__ volatile int counter = 0;;
__global__ void spinlol()
{
__shared__ int intraCTAlock;
if (!threadIdx.x && !threadIdx.y)
intraCTAlock = -1;
__syncthreads();
if (!threadIdx.x && !threadIdx.y)
while (atomicCAS((int*)&lock, -1, blockIdx.x) != -1);
__syncthreads();
if (threadIdx.x % 32 == 0)
{
while (atomicCAS(&intraCTAlock, -1, 12) != -1);
counter++;
__threadfence();
atomicExch(&intraCTAlock, -1);
}
__syncthreads();
if (!threadIdx.x && !threadIdx.y)
atomicExch((int*)&lock, -1);
}
int main(int argc, char** argv)
{
int hostcounter = -1;
spinlol<<<60, 512>>>();
cudaThreadSynchronize();
printf("err = %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyFromSymbol(&hostcounter, "counter", sizeof(int), 0, cudaMemcpyDeviceToHost);
printf("counter = %d\n", hostcounter);
}
|
dee2bca2c8bf273c298243d0f9bdf404e4d3b1a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void nllLoss(float *x, int x_stride, float *y, int* target) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
y[tid] = -1 * x[offset];
} | dee2bca2c8bf273c298243d0f9bdf404e4d3b1a2.cu | #include "includes.h"
__global__ void nllLoss(float *x, int x_stride, float *y, int* target) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
y[tid] = -1 * x[offset];
} |
a4a27805986ce6e25400e28b44155f62cc8c61bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2023, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include <ginkgo/core/base/executor.hpp>
#include <memory>
#include <type_traits>
#include <gtest/gtest.h>
#include <ginkgo/core/base/exception.hpp>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/stream.hpp>
#include "common/cuda_hip/base/executor.hpp.inc"
#include "cuda/base/scoped_device_id.hpp"
#include "cuda/test/utils.hpp"
namespace {
class ExampleOperation : public gko::Operation {
public:
explicit ExampleOperation(int& val) : value(val) {}
void run(std::shared_ptr<const gko::OmpExecutor>) const override
{
value = -1;
}
void run(std::shared_ptr<const gko::ReferenceExecutor>) const override
{
value = -2;
}
void run(std::shared_ptr<const gko::HipExecutor>) const override
{
value = -3;
}
void run(std::shared_ptr<const gko::DpcppExecutor>) const override
{
value = -4;
}
void run(std::shared_ptr<const gko::CudaExecutor>) const override
{
hipGetDevice(&value);
}
int& value;
};
class CudaExecutor : public ::testing::Test {
protected:
CudaExecutor()
:
#ifdef GKO_TEST_NONDEFAULT_STREAM
stream(0),
other_stream(gko::CudaExecutor::get_num_devices() - 1),
#endif
ref(gko::ReferenceExecutor::create()),
cuda(nullptr),
cuda2(nullptr),
cuda3(nullptr)
{}
void SetUp()
{
ASSERT_GT(gko::CudaExecutor::get_num_devices(), 0);
#ifdef GKO_TEST_NONDEFAULT_STREAM
cuda = gko::CudaExecutor::create(
0, ref, std::make_shared<gko::CudaAllocator>(), stream.get());
cuda2 = gko::CudaExecutor::create(
gko::CudaExecutor::get_num_devices() - 1, ref,
std::make_shared<gko::CudaAllocator>(), other_stream.get());
cuda3 = gko::CudaExecutor::create(
0, ref, std::make_shared<gko::CudaUnifiedAllocator>(0),
stream.get());
#else
cuda = gko::CudaExecutor::create(0, ref);
cuda2 = gko::CudaExecutor::create(
gko::CudaExecutor::get_num_devices() - 1, ref);
cuda3 = gko::CudaExecutor::create(
0, ref, std::make_shared<gko::CudaUnifiedAllocator>(0));
#endif
}
void TearDown()
{
if (cuda != nullptr) {
// ensure that previous calls finished and didn't throw an error
ASSERT_NO_THROW(cuda->synchronize());
}
}
#ifdef GKO_TEST_NONDEFAULT_STREAM
gko::cuda_stream stream;
gko::cuda_stream other_stream;
#endif
std::shared_ptr<gko::ReferenceExecutor> ref;
std::shared_ptr<gko::CudaExecutor> cuda;
std::shared_ptr<gko::CudaExecutor> cuda2;
std::shared_ptr<gko::CudaExecutor> cuda3;
};
TEST_F(CudaExecutor, CanInstantiateTwoExecutorsOnOneDevice)
{
auto cuda = gko::CudaExecutor::create(0, ref);
auto cuda2 = gko::CudaExecutor::create(0, ref);
// We want automatic deinitialization to not create any error
}
TEST_F(CudaExecutor, MasterKnowsNumberOfDevices)
{
int count = 0;
hipGetDeviceCount(&count);
auto num_devices = gko::CudaExecutor::get_num_devices();
ASSERT_EQ(count, num_devices);
}
TEST_F(CudaExecutor, AllocatesAndFreesMemory)
{
int* ptr = nullptr;
ASSERT_NO_THROW(ptr = cuda->alloc<int>(2));
ASSERT_NO_THROW(cuda->free(ptr));
}
TEST_F(CudaExecutor, FailsWhenOverallocating)
{
const gko::size_type num_elems = 1ll << 50; // 4PB of integers
int* ptr = nullptr;
ASSERT_THROW(
{
ptr = cuda->alloc<int>(num_elems);
cuda->synchronize();
},
gko::AllocationError);
cuda->free(ptr);
}
__global__ void check_data(int* data)
{
if (data[0] != 3 || data[1] != 8) {
asm("trap;");
}
}
TEST_F(CudaExecutor, CopiesDataToCuda)
{
int orig[] = {3, 8};
auto* copy = cuda->alloc<int>(2);
cuda->copy_from(ref, 2, orig, copy);
hipLaunchKernelGGL(( check_data), dim3(1), dim3(1), 0, cuda->get_stream(), copy);
ASSERT_NO_THROW(cuda->synchronize());
cuda->free(copy);
}
__global__ void check_data2(int* data)
{
if (data[0] != 4 || data[1] != 8) {
asm("trap;");
}
}
TEST_F(CudaExecutor, CanAllocateOnUnifiedMemory)
{
int orig[] = {3, 8};
auto* copy = cuda3->alloc<int>(2);
cuda3->copy_from(ref, 2, orig, copy);
hipLaunchKernelGGL(( check_data), dim3(1), dim3(1), 0, cuda3->get_stream(), copy);
ASSERT_NO_THROW(cuda3->synchronize());
copy[0] = 4;
hipLaunchKernelGGL(( check_data2), dim3(1), dim3(1), 0, cuda3->get_stream(), copy);
cuda3->free(copy);
}
__global__ void init_data(int* data)
{
data[0] = 3;
data[1] = 8;
}
TEST_F(CudaExecutor, CopiesDataFromCuda)
{
int copy[2];
auto orig = cuda->alloc<int>(2);
hipLaunchKernelGGL(( init_data), dim3(1), dim3(1), 0, cuda->get_stream(), orig);
ref->copy_from(cuda, 2, orig, copy);
EXPECT_EQ(3, copy[0]);
ASSERT_EQ(8, copy[1]);
cuda->free(orig);
}
/* Properly checks if it works only when multiple GPUs exist */
TEST_F(CudaExecutor, PreservesDeviceSettings)
{
auto previous_device = gko::CudaExecutor::get_num_devices() - 1;
GKO_ASSERT_NO_CUDA_ERRORS(hipSetDevice(previous_device));
auto orig = cuda->alloc<int>(2);
int current_device;
GKO_ASSERT_NO_CUDA_ERRORS(hipGetDevice(¤t_device));
ASSERT_EQ(current_device, previous_device);
cuda->free(orig);
GKO_ASSERT_NO_CUDA_ERRORS(hipGetDevice(¤t_device));
ASSERT_EQ(current_device, previous_device);
}
TEST_F(CudaExecutor, RunsOnProperDevice)
{
int value = -1;
GKO_ASSERT_NO_CUDA_ERRORS(hipSetDevice(0));
cuda2->run(ExampleOperation(value));
ASSERT_EQ(value, cuda2->get_device_id());
}
TEST_F(CudaExecutor, CopiesDataFromCudaToCuda)
{
int copy[2];
auto orig = cuda->alloc<int>(2);
GKO_ASSERT_NO_CUDA_ERRORS(hipSetDevice(0));
hipLaunchKernelGGL(( init_data), dim3(1), dim3(1), 0, cuda->get_stream(), orig);
auto copy_cuda2 = cuda2->alloc<int>(2);
cuda2->copy_from(cuda, 2, orig, copy_cuda2);
// Check that the data is really on GPU2 and ensure we did not cheat
int value = -1;
GKO_ASSERT_NO_CUDA_ERRORS(hipSetDevice(cuda2->get_device_id()));
hipLaunchKernelGGL(( check_data), dim3(1), dim3(1), 0, cuda2->get_stream(), copy_cuda2);
GKO_ASSERT_NO_CUDA_ERRORS(hipSetDevice(0));
cuda2->run(ExampleOperation(value));
ASSERT_EQ(value, cuda2->get_device_id());
// Put the results on OpenMP and run CPU side assertions
ref->copy_from(cuda2, 2, copy_cuda2, copy);
EXPECT_EQ(3, copy[0]);
ASSERT_EQ(8, copy[1]);
cuda2->free(copy_cuda2);
cuda->free(orig);
}
TEST_F(CudaExecutor, Synchronizes)
{
// Todo design a proper unit test once we support streams
ASSERT_NO_THROW(cuda->synchronize());
}
TEST_F(CudaExecutor, ExecInfoSetsCorrectProperties)
{
auto dev_id = cuda->get_device_id();
auto num_sm = 0;
auto major = 0;
auto minor = 0;
auto max_threads_per_block = 0;
auto warp_size = 0;
GKO_ASSERT_NO_CUDA_ERRORS(hipDeviceGetAttribute(
&num_sm, hipDeviceAttributeMultiprocessorCount, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(hipDeviceGetAttribute(
&major, hipDeviceAttributeComputeCapabilityMajor, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(hipDeviceGetAttribute(
&minor, hipDeviceAttributeComputeCapabilityMinor, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(hipDeviceGetAttribute(
&max_threads_per_block, hipDeviceAttributeMaxThreadsPerBlock, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(
hipDeviceGetAttribute(&warp_size, hipDeviceAttributeWarpSize, dev_id));
auto num_cores = convert_sm_ver_to_cores(major, minor);
ASSERT_EQ(cuda->get_major_version(), major);
ASSERT_EQ(cuda->get_minor_version(), minor);
ASSERT_EQ(cuda->get_num_multiprocessor(), num_sm);
ASSERT_EQ(cuda->get_warp_size(), warp_size);
ASSERT_EQ(cuda->get_num_warps(), num_sm * (num_cores / warp_size));
ASSERT_EQ(cuda->get_num_warps_per_sm(), num_cores / warp_size);
}
} // namespace
| a4a27805986ce6e25400e28b44155f62cc8c61bb.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2023, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include <ginkgo/core/base/executor.hpp>
#include <memory>
#include <type_traits>
#include <gtest/gtest.h>
#include <ginkgo/core/base/exception.hpp>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/stream.hpp>
#include "common/cuda_hip/base/executor.hpp.inc"
#include "cuda/base/scoped_device_id.hpp"
#include "cuda/test/utils.hpp"
namespace {
class ExampleOperation : public gko::Operation {
public:
explicit ExampleOperation(int& val) : value(val) {}
void run(std::shared_ptr<const gko::OmpExecutor>) const override
{
value = -1;
}
void run(std::shared_ptr<const gko::ReferenceExecutor>) const override
{
value = -2;
}
void run(std::shared_ptr<const gko::HipExecutor>) const override
{
value = -3;
}
void run(std::shared_ptr<const gko::DpcppExecutor>) const override
{
value = -4;
}
void run(std::shared_ptr<const gko::CudaExecutor>) const override
{
cudaGetDevice(&value);
}
int& value;
};
class CudaExecutor : public ::testing::Test {
protected:
CudaExecutor()
:
#ifdef GKO_TEST_NONDEFAULT_STREAM
stream(0),
other_stream(gko::CudaExecutor::get_num_devices() - 1),
#endif
ref(gko::ReferenceExecutor::create()),
cuda(nullptr),
cuda2(nullptr),
cuda3(nullptr)
{}
void SetUp()
{
ASSERT_GT(gko::CudaExecutor::get_num_devices(), 0);
#ifdef GKO_TEST_NONDEFAULT_STREAM
cuda = gko::CudaExecutor::create(
0, ref, std::make_shared<gko::CudaAllocator>(), stream.get());
cuda2 = gko::CudaExecutor::create(
gko::CudaExecutor::get_num_devices() - 1, ref,
std::make_shared<gko::CudaAllocator>(), other_stream.get());
cuda3 = gko::CudaExecutor::create(
0, ref, std::make_shared<gko::CudaUnifiedAllocator>(0),
stream.get());
#else
cuda = gko::CudaExecutor::create(0, ref);
cuda2 = gko::CudaExecutor::create(
gko::CudaExecutor::get_num_devices() - 1, ref);
cuda3 = gko::CudaExecutor::create(
0, ref, std::make_shared<gko::CudaUnifiedAllocator>(0));
#endif
}
void TearDown()
{
if (cuda != nullptr) {
// ensure that previous calls finished and didn't throw an error
ASSERT_NO_THROW(cuda->synchronize());
}
}
#ifdef GKO_TEST_NONDEFAULT_STREAM
gko::cuda_stream stream;
gko::cuda_stream other_stream;
#endif
std::shared_ptr<gko::ReferenceExecutor> ref;
std::shared_ptr<gko::CudaExecutor> cuda;
std::shared_ptr<gko::CudaExecutor> cuda2;
std::shared_ptr<gko::CudaExecutor> cuda3;
};
TEST_F(CudaExecutor, CanInstantiateTwoExecutorsOnOneDevice)
{
auto cuda = gko::CudaExecutor::create(0, ref);
auto cuda2 = gko::CudaExecutor::create(0, ref);
// We want automatic deinitialization to not create any error
}
TEST_F(CudaExecutor, MasterKnowsNumberOfDevices)
{
int count = 0;
cudaGetDeviceCount(&count);
auto num_devices = gko::CudaExecutor::get_num_devices();
ASSERT_EQ(count, num_devices);
}
TEST_F(CudaExecutor, AllocatesAndFreesMemory)
{
int* ptr = nullptr;
ASSERT_NO_THROW(ptr = cuda->alloc<int>(2));
ASSERT_NO_THROW(cuda->free(ptr));
}
TEST_F(CudaExecutor, FailsWhenOverallocating)
{
const gko::size_type num_elems = 1ll << 50; // 4PB of integers
int* ptr = nullptr;
ASSERT_THROW(
{
ptr = cuda->alloc<int>(num_elems);
cuda->synchronize();
},
gko::AllocationError);
cuda->free(ptr);
}
__global__ void check_data(int* data)
{
if (data[0] != 3 || data[1] != 8) {
asm("trap;");
}
}
TEST_F(CudaExecutor, CopiesDataToCuda)
{
int orig[] = {3, 8};
auto* copy = cuda->alloc<int>(2);
cuda->copy_from(ref, 2, orig, copy);
check_data<<<1, 1, 0, cuda->get_stream()>>>(copy);
ASSERT_NO_THROW(cuda->synchronize());
cuda->free(copy);
}
__global__ void check_data2(int* data)
{
if (data[0] != 4 || data[1] != 8) {
asm("trap;");
}
}
TEST_F(CudaExecutor, CanAllocateOnUnifiedMemory)
{
int orig[] = {3, 8};
auto* copy = cuda3->alloc<int>(2);
cuda3->copy_from(ref, 2, orig, copy);
check_data<<<1, 1, 0, cuda3->get_stream()>>>(copy);
ASSERT_NO_THROW(cuda3->synchronize());
copy[0] = 4;
check_data2<<<1, 1, 0, cuda3->get_stream()>>>(copy);
cuda3->free(copy);
}
__global__ void init_data(int* data)
{
data[0] = 3;
data[1] = 8;
}
TEST_F(CudaExecutor, CopiesDataFromCuda)
{
int copy[2];
auto orig = cuda->alloc<int>(2);
init_data<<<1, 1, 0, cuda->get_stream()>>>(orig);
ref->copy_from(cuda, 2, orig, copy);
EXPECT_EQ(3, copy[0]);
ASSERT_EQ(8, copy[1]);
cuda->free(orig);
}
/* Properly checks if it works only when multiple GPUs exist */
TEST_F(CudaExecutor, PreservesDeviceSettings)
{
auto previous_device = gko::CudaExecutor::get_num_devices() - 1;
GKO_ASSERT_NO_CUDA_ERRORS(cudaSetDevice(previous_device));
auto orig = cuda->alloc<int>(2);
int current_device;
GKO_ASSERT_NO_CUDA_ERRORS(cudaGetDevice(¤t_device));
ASSERT_EQ(current_device, previous_device);
cuda->free(orig);
GKO_ASSERT_NO_CUDA_ERRORS(cudaGetDevice(¤t_device));
ASSERT_EQ(current_device, previous_device);
}
TEST_F(CudaExecutor, RunsOnProperDevice)
{
int value = -1;
GKO_ASSERT_NO_CUDA_ERRORS(cudaSetDevice(0));
cuda2->run(ExampleOperation(value));
ASSERT_EQ(value, cuda2->get_device_id());
}
TEST_F(CudaExecutor, CopiesDataFromCudaToCuda)
{
int copy[2];
auto orig = cuda->alloc<int>(2);
GKO_ASSERT_NO_CUDA_ERRORS(cudaSetDevice(0));
init_data<<<1, 1, 0, cuda->get_stream()>>>(orig);
auto copy_cuda2 = cuda2->alloc<int>(2);
cuda2->copy_from(cuda, 2, orig, copy_cuda2);
// Check that the data is really on GPU2 and ensure we did not cheat
int value = -1;
GKO_ASSERT_NO_CUDA_ERRORS(cudaSetDevice(cuda2->get_device_id()));
check_data<<<1, 1, 0, cuda2->get_stream()>>>(copy_cuda2);
GKO_ASSERT_NO_CUDA_ERRORS(cudaSetDevice(0));
cuda2->run(ExampleOperation(value));
ASSERT_EQ(value, cuda2->get_device_id());
// Put the results on OpenMP and run CPU side assertions
ref->copy_from(cuda2, 2, copy_cuda2, copy);
EXPECT_EQ(3, copy[0]);
ASSERT_EQ(8, copy[1]);
cuda2->free(copy_cuda2);
cuda->free(orig);
}
TEST_F(CudaExecutor, Synchronizes)
{
// Todo design a proper unit test once we support streams
ASSERT_NO_THROW(cuda->synchronize());
}
TEST_F(CudaExecutor, ExecInfoSetsCorrectProperties)
{
auto dev_id = cuda->get_device_id();
auto num_sm = 0;
auto major = 0;
auto minor = 0;
auto max_threads_per_block = 0;
auto warp_size = 0;
GKO_ASSERT_NO_CUDA_ERRORS(cudaDeviceGetAttribute(
&num_sm, cudaDevAttrMultiProcessorCount, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(cudaDeviceGetAttribute(
&major, cudaDevAttrComputeCapabilityMajor, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(cudaDeviceGetAttribute(
&minor, cudaDevAttrComputeCapabilityMinor, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(cudaDeviceGetAttribute(
&max_threads_per_block, cudaDevAttrMaxThreadsPerBlock, dev_id));
GKO_ASSERT_NO_CUDA_ERRORS(
cudaDeviceGetAttribute(&warp_size, cudaDevAttrWarpSize, dev_id));
auto num_cores = convert_sm_ver_to_cores(major, minor);
ASSERT_EQ(cuda->get_major_version(), major);
ASSERT_EQ(cuda->get_minor_version(), minor);
ASSERT_EQ(cuda->get_num_multiprocessor(), num_sm);
ASSERT_EQ(cuda->get_warp_size(), warp_size);
ASSERT_EQ(cuda->get_num_warps(), num_sm * (num_cores / warp_size));
ASSERT_EQ(cuda->get_num_warps_per_sm(), num_cores / warp_size);
}
} // namespace
|
3b3a5e34e7d859f6922e8fa8bba12806ad4939de.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************
**
** SNOW - CS224 BROWN UNIVERSITY
**
** simulation.cu
** Authors: evjang, mliberma, taparson, wyegelwe
** Created: 17 Apr 2014
**
**************************************************************************/
#define CUDA_INCLUDE
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "math.h"
#include "sim/caches.h"
#include "sim/implicitcollider.h"
#include "sim/material.h"
#include "sim/particle.h"
#include "sim/particlegridnode.h"
#include "common/math.h"
#include "cuda/helpers.h"
#include "cuda/atomic.h"
#include "cuda/collider.h"
#include "cuda/decomposition.h"
#include "cuda/implicit.h"
#include "cuda/weighting.h"
#include "cuda/functions.h"
#define ALPHA 0.05f
#define GRAVITY vec3(0.f,-9.8f,0.f)
// Chain to compute the volume of the particle
/**
* Part of one time operation to compute particle volumes. First rasterize particle masses to grid
*
* Operation done over Particles over grid node particle affects
*/
__global__ void computeNodeMasses( const Particle *particles, int numParticles, const Grid *grid, float *nodeMasses )
{
int particleIdx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
const Particle &particle = particles[particleIdx];
glm::ivec3 currIJK;
Grid::gridIndexToIJK( threadIdx.y, glm::ivec3(4,4,4), currIJK );
vec3 particleGridPos = (particle.position - grid->pos) / grid->h;
currIJK += glm::ivec3(particleGridPos-1);
if ( Grid::withinBoundsInclusive(currIJK, glm::ivec3(0,0,0), grid->dim) ) {
vec3 nodePosition(currIJK);
vec3 dx = vec3::abs( particleGridPos - nodePosition );
float w = weight( dx );
atomicAdd( &nodeMasses[Grid::getGridIndex(currIJK, grid->dim+1)], particle.mass*w );
}
}
/**
* Computes the particle's density * grid's volume. This needs to be separate from computeCellMasses(...) because
* we need to wait for ALL threads to sync before computing the density
*
* Operation done over Particles over grid node particle affects
*/
__global__ void computeParticleDensity( Particle *particles, int numParticles, const Grid *grid, const float *cellMasses )
{
int particleIdx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
Particle &particle = particles[particleIdx];
glm::ivec3 currIJK;
Grid::gridIndexToIJK( threadIdx.y, glm::ivec3(4,4,4), currIJK );
vec3 particleGridPos = ( particle.position - grid->pos ) / grid->h;
currIJK += glm::ivec3(particleGridPos-1);
if ( Grid::withinBoundsInclusive(currIJK, glm::ivec3(0,0,0), grid->dim) ) {
vec3 nodePosition(currIJK);
vec3 dx = vec3::abs( particleGridPos - nodePosition );
float w = weight( dx );
float gridVolume = grid->h * grid->h * grid->h;
atomicAdd( &particle.volume, cellMasses[Grid::getGridIndex(currIJK, grid->dim+1)] * w / gridVolume ); //fill volume with particle density. Then in final step, compute volume
}
}
/**
* Computes the particle's volume. Assumes computeParticleDensity(...) has just been called.
*
* Operation done over particles
*/
__global__ void computeParticleVolume( Particle *particleData, int numParticles )
{
int particleIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
Particle &particle = particleData[particleIdx];
particle.volume = particle.mass / particle.volume; // Note: particle.volume is assumed to be the (particle's density ) before we compute it correctly
}
__host__ void initializeParticleVolumes( Particle *particles, int numParticles, const Grid *grid, int numNodes )
{
float *devNodeMasses;
checkCudaErrors( hipMalloc( (void**)&devNodeMasses, numNodes*sizeof(float) ) );
hipMemset( devNodeMasses, 0, numNodes*sizeof(float) );
const dim3 blocks( (numParticles+THREAD_COUNT-1)/THREAD_COUNT, 64 );
static const dim3 threads( THREAD_COUNT / 64, 64 );
LAUNCH(hipLaunchKernelGGL(( computeNodeMasses), dim3(blocks),dim3(threads), 0, 0, particles,numParticles,grid,devNodeMasses) );
LAUNCH(hipLaunchKernelGGL(( computeParticleDensity), dim3(blocks),dim3(threads), 0, 0, particles,numParticles,grid,devNodeMasses) );
LAUNCH(hipLaunchKernelGGL(( computeParticleVolume), dim3((numParticles+THREAD_COUNT-1)/THREAD_COUNT),dim3(THREAD_COUNT), 0, 0, particles,numParticles) );
checkCudaErrors( hipFree(devNodeMasses) );
}
__global__ void computeSigma( const Particle *particles, ParticleCache *particleCache, int numParticles, const Grid *grid )
{
int particleIdx = blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
const Particle &particle = particles[particleIdx];
const mat3 &Fp = particle.plasticF; //for the sake of making the code look like the math
const mat3 &Fe = particle.elasticF;
float Jpp = mat3::determinant(Fp);
float Jep = mat3::determinant(Fe);
mat3 Re;
computePD( Fe, Re );
const Material material = particle.material;
float muFp = material.mu*expf(material.xi*(1-Jpp));
float lambdaFp = material.lambda*expf(material.xi*(1-Jpp));
particleCache->sigmas[particleIdx] = (2*muFp*mat3::multiplyABt(Fe-Re, Fe) + mat3(lambdaFp*(Jep-1)*Jep)) * -particle.volume;
}
/**
* Called on each particle.
*
* Each particle adds it's mass, velocity and force contribution to the grid nodes within 2h of itself.
*
* In:
* particleData -- list of particles
* grid -- Stores grid paramters
* worldParams -- Global parameters dealing with the physics of the world
*
* Out:
* nodes -- list of every node in grid ((dim.x+1)*(dim.y+1)*(dim.z+1))
*
*/
__global__ void computeCellMassVelocityAndForceFast( const Particle *particleData, const ParticleCache *particleCache, int numParticles, const Grid *grid, Node *nodes )
{
int particleIdx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
const Particle &particle = particleData[particleIdx];
glm::ivec3 currIJK;
Grid::gridIndexToIJK(threadIdx.y, glm::ivec3(4,4,4), currIJK);
vec3 particleGridPos = (particle.position-grid->pos)/grid->h;
currIJK += glm::ivec3( particleGridPos-1 );
if ( Grid::withinBoundsInclusive(currIJK, glm::ivec3(0,0,0), grid->dim) ) {
Node &node = nodes[Grid::getGridIndex(currIJK, grid->dim+1)];
float w;
vec3 wg;
vec3 nodePosition(currIJK.x, currIJK.y, currIJK.z);
weightAndGradient( particleGridPos - nodePosition, w, wg );
atomicAdd( &node.mass, particle.mass*w );
atomicAdd( &node.velocity, particle.velocity*particle.mass*w );
atomicAdd( &node.force, particleCache->sigmas[particleIdx]*wg );
}
}
/**
* Called on each grid node.
*
* Updates the velocities of each grid node based on forces and collisions
*
* In:
* nodes -- list of all nodes in the grid.
* dt -- delta time, time step of simulation
* colliders -- array of colliders in the scene.
* numColliders -- number of colliders in the scene
* worldParams -- Global parameters dealing with the physics of the world
* grid -- parameters defining the grid
*
* Out:
* nodes -- updated velocity and velocityChange
*
*/
__global__ void updateNodeVelocities( Node *nodes, int numNodes, float dt, const ImplicitCollider* colliders, int numColliders, const Grid *grid, bool updateVelocityChange )
{
int nodeIdx = blockIdx.x*blockDim.x + threadIdx.x;
if ( nodeIdx >= numNodes ) return;
Node &node = nodes[nodeIdx];
if ( node.mass > 0.f ) {
// Have to normalize velocity by mass to conserve momentum
float scale = 1.f / node.mass;
node.velocity *= scale;
// Initialize velocityChange with pre-update velocity
node.velocityChange = node.velocity;
// Gravity for node forces
node.force += node.mass * GRAVITY;
// Update velocity with node force
node.velocity += dt * scale * node.force;
// Handle collisions
int gridI, gridJ, gridK;
Grid::gridIndexToIJK( nodeIdx, gridI, gridJ, gridK, grid->dim+1 );
vec3 nodePosition = vec3(gridI, gridJ, gridK)*grid->h + grid->pos;
checkForAndHandleCollisions( colliders, numColliders, nodePosition, node.velocity );
if ( updateVelocityChange ) node.velocityChange = node.velocity - node.velocityChange;
}
}
// Use weighting functions to compute particle velocity gradient and update particle velocity
__device__ void processGridVelocities( Particle &particle, const Grid *grid, const Node *nodes, mat3 &velocityGradient )
{
const vec3 &pos = particle.position;
const glm::ivec3 &dim = grid->dim;
const float h = grid->h;
// Compute neighborhood of particle in grid
vec3 particleGridPos = (pos - grid->pos) / h,
gridMax = vec3::floor( particleGridPos + vec3(2,2,2) ),
gridMin = vec3::ceil( particleGridPos - vec3(2,2,2) );
glm::ivec3 maxIndex = glm::clamp( glm::ivec3(gridMax), glm::ivec3(0,0,0), dim ),
minIndex = glm::clamp( glm::ivec3(gridMin), glm::ivec3(0,0,0), dim );
// For computing particle velocity gradient:
// grad(v_p) = sum( v_i * transpose(grad(w_ip)) ) = [3x3 matrix]
// For updating particle velocity:
// v_PIC = sum( v_i * w_ip )
// v_FLIP = v_p + sum( dv_i * w_ip )
// v = (1-alpha)*v_PIC _ alpha*v_FLIP
vec3 v_PIC(0,0,0), dv_FLIP(0,0,0);
int rowSize = dim.z+1;
int pageSize = (dim.y+1)*rowSize;
for ( int i = minIndex.x; i <= maxIndex.x; ++i ) {
vec3 d, s;
d.x = particleGridPos.x - i;
d.x *= ( s.x = ( d.x < 0 ) ? -1.f : 1.f );
int pageOffset = i*pageSize;
for ( int j = minIndex.y; j <= maxIndex.y; ++j ) {
d.y = particleGridPos.y - j;
d.y *= ( s.y = ( d.y < 0 ) ? -1.f : 1.f );
int rowOffset = pageOffset + j*rowSize;
for ( int k = minIndex.z; k <= maxIndex.z; ++k ) {
d.z = particleGridPos.z - k;
d.z *= ( s.z = ( d.z < 0 ) ? -1.f : 1.f );
const Node &node = nodes[rowOffset+k];
float w;
vec3 wg;
weightAndGradient( s, d, w, wg );
velocityGradient += mat3::outerProduct( node.velocity, wg );
// Particle velocities
v_PIC += node.velocity * w;
dv_FLIP += node.velocityChange * w;
}
}
}
particle.velocity = (1.f-ALPHA)*v_PIC + ALPHA*(particle.velocity+dv_FLIP);
}
__device__ void updateParticleDeformationGradients( Particle &particle, const mat3 &velocityGradient, float timeStep )
{
// Temporarily assign all deformation to elastic portion
particle.elasticF = mat3::addIdentity( timeStep*velocityGradient ) * particle.elasticF;
const Material &material = particle.material;
// Clamp the singular values
mat3 W, S, Sinv, V;
computeSVD( particle.elasticF, W, S, V );
// FAST COMPUTATION:
S = mat3( CLAMP( S[0], material.criticalCompressionRatio, material.criticalStretchRatio ), 0.f, 0.f,
0.f, CLAMP( S[4], material.criticalCompressionRatio, material.criticalStretchRatio ), 0.f,
0.f, 0.f, CLAMP( S[8], material.criticalCompressionRatio, material.criticalStretchRatio ) );
Sinv = mat3( 1.f/S[0], 0.f, 0.f,
0.f, 1.f/S[4], 0.f,
0.f, 0.f, 1.f/S[8] );
particle.plasticF = mat3::multiplyADBt( V, Sinv, W ) * particle.elasticF * particle.plasticF;
particle.elasticF = mat3::multiplyADBt( W, S, V );
// // MORE ACCURATE COMPUTATION:
// S[0] = CLAMP( S[0], material->criticalCompressionRatio, material->criticalStretchRatio );
// S[4] = CLAMP( S[4], material->criticalCompressionRatio, material->criticalStretchRatio );
// S[8] = CLAMP( S[8], material->criticalCompressionRatio, material->criticalStretchRatio );
// particle.plasticF = V * mat3::inverse( S ) * mat3::transpose( W ) * particle.elasticF * particle.plasticF;
// particle.elasticF = W * S * mat3::transpose( V );
}
__global__ void updateParticlesFromGrid( Particle *particles, int numParticles, const Grid *grid, const Node *nodes, float timeStep, const ImplicitCollider *colliders, int numColliders )
{
int particleIdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( particleIdx >= numParticles ) return;
Particle &particle = particles[particleIdx];
// Update particle velocities and fill in velocity gradient for deformation gradient computation
mat3 velocityGradient = mat3( 0.f );
processGridVelocities( particle, grid, nodes, velocityGradient );
updateParticleDeformationGradients( particle, velocityGradient, timeStep );
checkForAndHandleCollisions( colliders, numColliders, particle.position, particle.velocity );
particle.position += timeStep * ( particle.velocity );
}
__global__ void updateColliderPositions(ImplicitCollider *colliders, int numColliders,float timestep)
{
int colliderIdx = blockDim.x*blockIdx.x + threadIdx.x;
colliders[colliderIdx].center += colliders[colliderIdx].velocity*timestep;
}
__host__ void updateParticles( Particle *particles, ParticleCache *devParticleCache, ParticleCache *hostParticleCache, int numParticles,
Grid *grid, Node *nodes, NodeCache *nodeCaches, int numNodes,
ImplicitCollider *colliders, int numColliders,
float timeStep, bool implicitUpdate )
{
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// Clear data before update
checkCudaErrors( hipMemset(nodes, 0, numNodes*sizeof(Node)) );
checkCudaErrors( hipMemset(nodeCaches, 0, numNodes*sizeof(NodeCache)) );
// All dat ParticleCache data
hipMemset( hostParticleCache->sigmas, 0, numParticles*sizeof(mat3) );
hipMemset( hostParticleCache->Aps, 0, numParticles*sizeof(mat3) );
hipMemset( hostParticleCache->FeHats, 0, numParticles*sizeof(mat3) );
hipMemset( hostParticleCache->ReHats, 0, numParticles*sizeof(mat3) );
hipMemset( hostParticleCache->SeHats, 0, numParticles*sizeof(mat3) );
hipMemset( hostParticleCache->dFs, 0, numParticles*sizeof(mat3) );
const dim3 pBlocks1D( (numParticles+THREAD_COUNT-1)/THREAD_COUNT );
const dim3 nBlocks1D( (numNodes+THREAD_COUNT-1)/THREAD_COUNT );
const dim3 threads1D( THREAD_COUNT );
const dim3 pBlocks2D( (numParticles+THREAD_COUNT-1)/THREAD_COUNT, 64 );
const dim3 threads2D( THREAD_COUNT/64, 64 );
LAUNCH(hipLaunchKernelGGL(( updateColliderPositions), dim3(numColliders),dim3(1), 0, 0, colliders,numColliders,timeStep) );
LAUNCH(hipLaunchKernelGGL(( computeSigma), dim3(pBlocks1D),dim3(threads1D), 0, 0, particles,devParticleCache,numParticles,grid) );
LAUNCH(hipLaunchKernelGGL(( computeCellMassVelocityAndForceFast), dim3(pBlocks2D),dim3(threads2D), 0, 0, particles,devParticleCache,numParticles,grid,nodes) );
LAUNCH(hipLaunchKernelGGL(( updateNodeVelocities), dim3(nBlocks1D),dim3(threads1D), 0, 0, nodes,numNodes,timeStep,colliders,numColliders,grid,!implicitUpdate) );
if ( implicitUpdate ) integrateNodeForces( particles, devParticleCache, numParticles, grid, nodes, nodeCaches, numNodes, timeStep );
LAUNCH(hipLaunchKernelGGL(( updateParticlesFromGrid), dim3(pBlocks1D),dim3(threads1D), 0, 0, particles,numParticles,grid,nodes,timeStep,colliders,numColliders) );
}
| 3b3a5e34e7d859f6922e8fa8bba12806ad4939de.cu | /**************************************************************************
**
** SNOW - CS224 BROWN UNIVERSITY
**
** simulation.cu
** Authors: evjang, mliberma, taparson, wyegelwe
** Created: 17 Apr 2014
**
**************************************************************************/
#define CUDA_INCLUDE
#include <cuda.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "math.h"
#include "sim/caches.h"
#include "sim/implicitcollider.h"
#include "sim/material.h"
#include "sim/particle.h"
#include "sim/particlegridnode.h"
#include "common/math.h"
#include "cuda/helpers.h"
#include "cuda/atomic.h"
#include "cuda/collider.h"
#include "cuda/decomposition.h"
#include "cuda/implicit.h"
#include "cuda/weighting.h"
#include "cuda/functions.h"
#define ALPHA 0.05f
#define GRAVITY vec3(0.f,-9.8f,0.f)
// Chain to compute the volume of the particle
/**
* Part of one time operation to compute particle volumes. First rasterize particle masses to grid
*
* Operation done over Particles over grid node particle affects
*/
__global__ void computeNodeMasses( const Particle *particles, int numParticles, const Grid *grid, float *nodeMasses )
{
int particleIdx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
const Particle &particle = particles[particleIdx];
glm::ivec3 currIJK;
Grid::gridIndexToIJK( threadIdx.y, glm::ivec3(4,4,4), currIJK );
vec3 particleGridPos = (particle.position - grid->pos) / grid->h;
currIJK += glm::ivec3(particleGridPos-1);
if ( Grid::withinBoundsInclusive(currIJK, glm::ivec3(0,0,0), grid->dim) ) {
vec3 nodePosition(currIJK);
vec3 dx = vec3::abs( particleGridPos - nodePosition );
float w = weight( dx );
atomicAdd( &nodeMasses[Grid::getGridIndex(currIJK, grid->dim+1)], particle.mass*w );
}
}
/**
* Computes the particle's density * grid's volume. This needs to be separate from computeCellMasses(...) because
* we need to wait for ALL threads to sync before computing the density
*
* Operation done over Particles over grid node particle affects
*/
__global__ void computeParticleDensity( Particle *particles, int numParticles, const Grid *grid, const float *cellMasses )
{
int particleIdx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
Particle &particle = particles[particleIdx];
glm::ivec3 currIJK;
Grid::gridIndexToIJK( threadIdx.y, glm::ivec3(4,4,4), currIJK );
vec3 particleGridPos = ( particle.position - grid->pos ) / grid->h;
currIJK += glm::ivec3(particleGridPos-1);
if ( Grid::withinBoundsInclusive(currIJK, glm::ivec3(0,0,0), grid->dim) ) {
vec3 nodePosition(currIJK);
vec3 dx = vec3::abs( particleGridPos - nodePosition );
float w = weight( dx );
float gridVolume = grid->h * grid->h * grid->h;
atomicAdd( &particle.volume, cellMasses[Grid::getGridIndex(currIJK, grid->dim+1)] * w / gridVolume ); //fill volume with particle density. Then in final step, compute volume
}
}
/**
* Computes the particle's volume. Assumes computeParticleDensity(...) has just been called.
*
* Operation done over particles
*/
__global__ void computeParticleVolume( Particle *particleData, int numParticles )
{
int particleIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
Particle &particle = particleData[particleIdx];
particle.volume = particle.mass / particle.volume; // Note: particle.volume is assumed to be the (particle's density ) before we compute it correctly
}
__host__ void initializeParticleVolumes( Particle *particles, int numParticles, const Grid *grid, int numNodes )
{
float *devNodeMasses;
checkCudaErrors( cudaMalloc( (void**)&devNodeMasses, numNodes*sizeof(float) ) );
cudaMemset( devNodeMasses, 0, numNodes*sizeof(float) );
const dim3 blocks( (numParticles+THREAD_COUNT-1)/THREAD_COUNT, 64 );
static const dim3 threads( THREAD_COUNT / 64, 64 );
LAUNCH( computeNodeMasses<<<blocks,threads>>>(particles,numParticles,grid,devNodeMasses) );
LAUNCH( computeParticleDensity<<<blocks,threads>>>(particles,numParticles,grid,devNodeMasses) );
LAUNCH( computeParticleVolume<<<(numParticles+THREAD_COUNT-1)/THREAD_COUNT,THREAD_COUNT>>>(particles,numParticles) );
checkCudaErrors( cudaFree(devNodeMasses) );
}
__global__ void computeSigma( const Particle *particles, ParticleCache *particleCache, int numParticles, const Grid *grid )
{
int particleIdx = blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
const Particle &particle = particles[particleIdx];
const mat3 &Fp = particle.plasticF; //for the sake of making the code look like the math
const mat3 &Fe = particle.elasticF;
float Jpp = mat3::determinant(Fp);
float Jep = mat3::determinant(Fe);
mat3 Re;
computePD( Fe, Re );
const Material material = particle.material;
float muFp = material.mu*expf(material.xi*(1-Jpp));
float lambdaFp = material.lambda*expf(material.xi*(1-Jpp));
particleCache->sigmas[particleIdx] = (2*muFp*mat3::multiplyABt(Fe-Re, Fe) + mat3(lambdaFp*(Jep-1)*Jep)) * -particle.volume;
}
/**
* Called on each particle.
*
* Each particle adds it's mass, velocity and force contribution to the grid nodes within 2h of itself.
*
* In:
* particleData -- list of particles
* grid -- Stores grid paramters
* worldParams -- Global parameters dealing with the physics of the world
*
* Out:
* nodes -- list of every node in grid ((dim.x+1)*(dim.y+1)*(dim.z+1))
*
*/
__global__ void computeCellMassVelocityAndForceFast( const Particle *particleData, const ParticleCache *particleCache, int numParticles, const Grid *grid, Node *nodes )
{
int particleIdx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if ( particleIdx >= numParticles ) return;
const Particle &particle = particleData[particleIdx];
glm::ivec3 currIJK;
Grid::gridIndexToIJK(threadIdx.y, glm::ivec3(4,4,4), currIJK);
vec3 particleGridPos = (particle.position-grid->pos)/grid->h;
currIJK += glm::ivec3( particleGridPos-1 );
if ( Grid::withinBoundsInclusive(currIJK, glm::ivec3(0,0,0), grid->dim) ) {
Node &node = nodes[Grid::getGridIndex(currIJK, grid->dim+1)];
float w;
vec3 wg;
vec3 nodePosition(currIJK.x, currIJK.y, currIJK.z);
weightAndGradient( particleGridPos - nodePosition, w, wg );
atomicAdd( &node.mass, particle.mass*w );
atomicAdd( &node.velocity, particle.velocity*particle.mass*w );
atomicAdd( &node.force, particleCache->sigmas[particleIdx]*wg );
}
}
/**
* Called on each grid node.
*
* Updates the velocities of each grid node based on forces and collisions
*
* In:
* nodes -- list of all nodes in the grid.
* dt -- delta time, time step of simulation
* colliders -- array of colliders in the scene.
* numColliders -- number of colliders in the scene
* worldParams -- Global parameters dealing with the physics of the world
* grid -- parameters defining the grid
*
* Out:
* nodes -- updated velocity and velocityChange
*
*/
__global__ void updateNodeVelocities( Node *nodes, int numNodes, float dt, const ImplicitCollider* colliders, int numColliders, const Grid *grid, bool updateVelocityChange )
{
int nodeIdx = blockIdx.x*blockDim.x + threadIdx.x;
if ( nodeIdx >= numNodes ) return;
Node &node = nodes[nodeIdx];
if ( node.mass > 0.f ) {
// Have to normalize velocity by mass to conserve momentum
float scale = 1.f / node.mass;
node.velocity *= scale;
// Initialize velocityChange with pre-update velocity
node.velocityChange = node.velocity;
// Gravity for node forces
node.force += node.mass * GRAVITY;
// Update velocity with node force
node.velocity += dt * scale * node.force;
// Handle collisions
int gridI, gridJ, gridK;
Grid::gridIndexToIJK( nodeIdx, gridI, gridJ, gridK, grid->dim+1 );
vec3 nodePosition = vec3(gridI, gridJ, gridK)*grid->h + grid->pos;
checkForAndHandleCollisions( colliders, numColliders, nodePosition, node.velocity );
if ( updateVelocityChange ) node.velocityChange = node.velocity - node.velocityChange;
}
}
// Use weighting functions to compute particle velocity gradient and update particle velocity
__device__ void processGridVelocities( Particle &particle, const Grid *grid, const Node *nodes, mat3 &velocityGradient )
{
const vec3 &pos = particle.position;
const glm::ivec3 &dim = grid->dim;
const float h = grid->h;
// Compute neighborhood of particle in grid
vec3 particleGridPos = (pos - grid->pos) / h,
gridMax = vec3::floor( particleGridPos + vec3(2,2,2) ),
gridMin = vec3::ceil( particleGridPos - vec3(2,2,2) );
glm::ivec3 maxIndex = glm::clamp( glm::ivec3(gridMax), glm::ivec3(0,0,0), dim ),
minIndex = glm::clamp( glm::ivec3(gridMin), glm::ivec3(0,0,0), dim );
// For computing particle velocity gradient:
// grad(v_p) = sum( v_i * transpose(grad(w_ip)) ) = [3x3 matrix]
// For updating particle velocity:
// v_PIC = sum( v_i * w_ip )
// v_FLIP = v_p + sum( dv_i * w_ip )
// v = (1-alpha)*v_PIC _ alpha*v_FLIP
vec3 v_PIC(0,0,0), dv_FLIP(0,0,0);
int rowSize = dim.z+1;
int pageSize = (dim.y+1)*rowSize;
for ( int i = minIndex.x; i <= maxIndex.x; ++i ) {
vec3 d, s;
d.x = particleGridPos.x - i;
d.x *= ( s.x = ( d.x < 0 ) ? -1.f : 1.f );
int pageOffset = i*pageSize;
for ( int j = minIndex.y; j <= maxIndex.y; ++j ) {
d.y = particleGridPos.y - j;
d.y *= ( s.y = ( d.y < 0 ) ? -1.f : 1.f );
int rowOffset = pageOffset + j*rowSize;
for ( int k = minIndex.z; k <= maxIndex.z; ++k ) {
d.z = particleGridPos.z - k;
d.z *= ( s.z = ( d.z < 0 ) ? -1.f : 1.f );
const Node &node = nodes[rowOffset+k];
float w;
vec3 wg;
weightAndGradient( s, d, w, wg );
velocityGradient += mat3::outerProduct( node.velocity, wg );
// Particle velocities
v_PIC += node.velocity * w;
dv_FLIP += node.velocityChange * w;
}
}
}
particle.velocity = (1.f-ALPHA)*v_PIC + ALPHA*(particle.velocity+dv_FLIP);
}
__device__ void updateParticleDeformationGradients( Particle &particle, const mat3 &velocityGradient, float timeStep )
{
// Temporarily assign all deformation to elastic portion
particle.elasticF = mat3::addIdentity( timeStep*velocityGradient ) * particle.elasticF;
const Material &material = particle.material;
// Clamp the singular values
mat3 W, S, Sinv, V;
computeSVD( particle.elasticF, W, S, V );
// FAST COMPUTATION:
S = mat3( CLAMP( S[0], material.criticalCompressionRatio, material.criticalStretchRatio ), 0.f, 0.f,
0.f, CLAMP( S[4], material.criticalCompressionRatio, material.criticalStretchRatio ), 0.f,
0.f, 0.f, CLAMP( S[8], material.criticalCompressionRatio, material.criticalStretchRatio ) );
Sinv = mat3( 1.f/S[0], 0.f, 0.f,
0.f, 1.f/S[4], 0.f,
0.f, 0.f, 1.f/S[8] );
particle.plasticF = mat3::multiplyADBt( V, Sinv, W ) * particle.elasticF * particle.plasticF;
particle.elasticF = mat3::multiplyADBt( W, S, V );
// // MORE ACCURATE COMPUTATION:
// S[0] = CLAMP( S[0], material->criticalCompressionRatio, material->criticalStretchRatio );
// S[4] = CLAMP( S[4], material->criticalCompressionRatio, material->criticalStretchRatio );
// S[8] = CLAMP( S[8], material->criticalCompressionRatio, material->criticalStretchRatio );
// particle.plasticF = V * mat3::inverse( S ) * mat3::transpose( W ) * particle.elasticF * particle.plasticF;
// particle.elasticF = W * S * mat3::transpose( V );
}
__global__ void updateParticlesFromGrid( Particle *particles, int numParticles, const Grid *grid, const Node *nodes, float timeStep, const ImplicitCollider *colliders, int numColliders )
{
int particleIdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( particleIdx >= numParticles ) return;
Particle &particle = particles[particleIdx];
// Update particle velocities and fill in velocity gradient for deformation gradient computation
mat3 velocityGradient = mat3( 0.f );
processGridVelocities( particle, grid, nodes, velocityGradient );
updateParticleDeformationGradients( particle, velocityGradient, timeStep );
checkForAndHandleCollisions( colliders, numColliders, particle.position, particle.velocity );
particle.position += timeStep * ( particle.velocity );
}
__global__ void updateColliderPositions(ImplicitCollider *colliders, int numColliders,float timestep)
{
int colliderIdx = blockDim.x*blockIdx.x + threadIdx.x;
colliders[colliderIdx].center += colliders[colliderIdx].velocity*timestep;
}
__host__ void updateParticles( Particle *particles, ParticleCache *devParticleCache, ParticleCache *hostParticleCache, int numParticles,
Grid *grid, Node *nodes, NodeCache *nodeCaches, int numNodes,
ImplicitCollider *colliders, int numColliders,
float timeStep, bool implicitUpdate )
{
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// Clear data before update
checkCudaErrors( cudaMemset(nodes, 0, numNodes*sizeof(Node)) );
checkCudaErrors( cudaMemset(nodeCaches, 0, numNodes*sizeof(NodeCache)) );
// All dat ParticleCache data
cudaMemset( hostParticleCache->sigmas, 0, numParticles*sizeof(mat3) );
cudaMemset( hostParticleCache->Aps, 0, numParticles*sizeof(mat3) );
cudaMemset( hostParticleCache->FeHats, 0, numParticles*sizeof(mat3) );
cudaMemset( hostParticleCache->ReHats, 0, numParticles*sizeof(mat3) );
cudaMemset( hostParticleCache->SeHats, 0, numParticles*sizeof(mat3) );
cudaMemset( hostParticleCache->dFs, 0, numParticles*sizeof(mat3) );
const dim3 pBlocks1D( (numParticles+THREAD_COUNT-1)/THREAD_COUNT );
const dim3 nBlocks1D( (numNodes+THREAD_COUNT-1)/THREAD_COUNT );
const dim3 threads1D( THREAD_COUNT );
const dim3 pBlocks2D( (numParticles+THREAD_COUNT-1)/THREAD_COUNT, 64 );
const dim3 threads2D( THREAD_COUNT/64, 64 );
LAUNCH( updateColliderPositions<<<numColliders,1>>>(colliders,numColliders,timeStep) );
LAUNCH( computeSigma<<<pBlocks1D,threads1D>>>(particles,devParticleCache,numParticles,grid) );
LAUNCH( computeCellMassVelocityAndForceFast<<<pBlocks2D,threads2D>>>(particles,devParticleCache,numParticles,grid,nodes) );
LAUNCH( updateNodeVelocities<<<nBlocks1D,threads1D>>>(nodes,numNodes,timeStep,colliders,numColliders,grid,!implicitUpdate) );
if ( implicitUpdate ) integrateNodeForces( particles, devParticleCache, numParticles, grid, nodes, nodeCaches, numNodes, timeStep );
LAUNCH( updateParticlesFromGrid<<<pBlocks1D,threads1D>>>(particles,numParticles,grid,nodes,timeStep,colliders,numColliders) );
}
|
7929fa92c6b054c0e6897c570d5070a096ee5298.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <thrust/sequence.h>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/hashmap/HashBackendBuffer.h"
namespace open3d {
namespace core {
void CUDAResetHeap(Tensor &heap) {
uint32_t *heap_ptr = heap.GetDataPtr<uint32_t>();
thrust::sequence(thrust::device, heap_ptr, heap_ptr + heap.GetLength(), 0);
OPEN3D_CUDA_CHECK(hipGetLastError());
}
} // namespace core
} // namespace open3d
| 7929fa92c6b054c0e6897c570d5070a096ee5298.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <thrust/sequence.h>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/hashmap/HashBackendBuffer.h"
namespace open3d {
namespace core {
void CUDAResetHeap(Tensor &heap) {
uint32_t *heap_ptr = heap.GetDataPtr<uint32_t>();
thrust::sequence(thrust::device, heap_ptr, heap_ptr + heap.GetLength(), 0);
OPEN3D_CUDA_CHECK(cudaGetLastError());
}
} // namespace core
} // namespace open3d
|
ef6b8068bab64f7db8e72f0c7acac4e52569007a.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cudpp_hash.h>
#include <cuda_util.h>
#include <hip/hip_runtime_api.h>
// input: nsample (1), true_mat (b, m, n)
// output: idx (b, m, nsample)
__global__ void assign_idx_gpu(int b, int n, int m, int nsample, const bool *true_mat, int *idx){
int batch_index = blockIdx.x;
true_mat += n*m*batch_index;
idx += nsample*m*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
//j: the jth centroids within the batch
for (int j = index; j < m; j += stride){
int cnt = 0;
for(int i = 0; i < n; i++){
// printf("batch_index:%d index:%d j:%d i:%d\n", batch_index, index, j, i);
if(true_mat[j*n + i]){
idx[j*nsample + cnt] = i;
cnt++;
}
}
for(; cnt < nsample; cnt++){
idx[j*nsample + cnt] = idx[j*nsample];
}
}
}
//__global__ void assign_idx_gpu(int b, int m, int nsample, const int *true_idx, const int *end_idx, int *idx){
// int batch_index = blockIdx.x;
// end_idx += m*batch_index;
// idx += m*nsample*batch_index;
//
// int index = threadIdx.x;
// int stride = blockDim.x;
//
// //j: the jth centroids within the batch
// for (int j = index; j < m; j += stride){
// int start_idx_;
// if (j == 0 && batch_index == 0){
// start_idx_ = 0;
// }else{
// start_idx_ = end_idx[j-1];
// }
// int end_idx_ = end_idx[j];
//
// int i = 0;
// for (; i < (end_idx_ - start_idx_); i++){
// idx[nsample * j + i] = true_idx[start_idx_ + i];
// }
// for (; i < nsample; i++){
// idx[nsample * j + i] = true_idx[end_idx_ - 1];
// }
// }
//}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
// if (fabsf(x2-x1)<radius && (y2-y1)<radius && (z2-z1)<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
//compose_insert_items<<<b,256>>>(b, n, grid_size, all_xyz, limits, sizes, d_keys, d_vals);
__global__ void compose_insert_items(int b, int n, float grid_size, const float *all_xyz, const float *limits, const int *sizes, unsigned int *d_keys, unsigned int *d_vals){
int index = threadIdx.x;
if(index < n){
int batch_index = blockIdx.x;
all_xyz += batch_index * n * 3;
unsigned int *tmp_d_keys = d_keys + batch_index * n;
unsigned int *tmp_d_vals = d_vals + batch_index * n;
int stride = blockDim.x;
for(int point_idx = index; point_idx < n; point_idx += stride){
unsigned int x_idx = __float2uint_rd((all_xyz[point_idx*3] - limits[0]) / grid_size) + 1;
unsigned int y_idx = __float2uint_rd((all_xyz[point_idx*3+1] - limits[2]) / grid_size) + 1;
unsigned int z_idx = __float2uint_rd((all_xyz[point_idx*3+2] - limits[4]) / grid_size) + 1;
tmp_d_keys[point_idx] = z_idx + sizes[2] * (y_idx + sizes[1] * (x_idx + batch_index * sizes[0]));
tmp_d_vals[point_idx] = point_idx;
// printf("b:%d, point_idx:%d, x:%f, y:%f, z:%f, x_idx:%u, y_idx:%u, z_idx:%u, key: %u, val: %u\n", batch_index, point_idx, all_xyz[point_idx*3], all_xyz[point_idx*3+1], all_xyz[point_idx*3+2], x_idx, y_idx, z_idx, tmp_d_keys[point_idx], tmp_d_vals[point_idx]);
}
}
}
//compose_queries<<<b,256>>>(b, m, grid_size, centroids_xyz, limits, sizes, d_queries);
__global__ void compose_queries(int b, int m, float grid_size, const float *centroids_xyz, const float *limits, const int *sizes, unsigned int *d_queries){
int index = threadIdx.x;
if(index < m){
int stride = blockDim.x;
int batch_index = blockIdx.x;
centroids_xyz += batch_index * m * 3;
unsigned int *tmp_d_queries = d_queries + batch_index * m * 27;
unsigned int x_idx = __float2uint_rd((centroids_xyz[index*3] - limits[0]) / grid_size);
unsigned int y_idx = __float2uint_rd((centroids_xyz[index*3+1] - limits[2]) / grid_size);
unsigned int z_idx = __float2uint_rd((centroids_xyz[index*3+2] - limits[4]) / grid_size);
int cnt = 0;
for(int x_offset = 0; x_offset < 3; x_offset++){
for(int y_offset = 0; y_offset < 3; y_offset++){
for(int z_offset = 0; z_offset < 3; z_offset++){
tmp_d_queries[index*27+cnt] = z_idx + z_offset + sizes[2] * (y_idx + y_offset + sizes[1] * (x_idx + x_offset + batch_index * sizes[0]));
// if(x_offset == 1 && y_offset == 1 && z_offset == 1){
// printf("b:%d, centroids_idx:%d, x:%f, y:%f, z:%f, x_grid:%u, y_grid:%u, z_grid:%u, query_self:%u\n", batch_index, index, centroids_xyz[index*3], centroids_xyz[index*3+1], centroids_xyz[index*3+2], x_offset+x_idx, y_offset+y_idx, z_offset+z_idx, tmp_d_queries[index+cnt]);
// }
cnt++;
}
}
}
}
}
// hipLaunchKernelGGL(( hash_square_idx_gpu), dim3(b),dim3(256), 0, 0, b, n, m, nsample, d_vals_multivalue, d_sorted_idx, d_all_values, idx, pts_cnt);
// , const unsigned int *sorted_idx
__global__ void hash_square_idx_gpu(int b, int n, int m, int nsample, const uint2 *d_vals_multivalue, const unsigned int * d_all_values, int *idx, int *pts_cnt){
int index = threadIdx.x;
if(index < m){
int stride = blockDim.x;
int batch_index = blockIdx.x;
unsigned int sorted_idx[27] = {13, 4,10,12,14,16,22, 1,3,5,7,9,11,15,17,19,21,23,25, 0,2,6,8,18,20,24,26};
// if(index == 0 && batch_index == 0){
// printf("d_vals_multivalue:\n");
// for(int kk = 0; kk < 2 * 3 * 27; kk++){
// printf("%u %u\n", d_vals_multivalue[kk].x, d_vals_multivalue[kk].y);
// }
// }
idx += batch_index * m * nsample;
pts_cnt += batch_index * m;
int query_idx_base = batch_index*m*27+index*27;
int cnt = 0;
for(int i = 0; i < 27; i++){
int query_idx = query_idx_base + sorted_idx[i];
// int query_idx = batch_index*m*27+index*27+i;
// printf("query_idx: %d ", query_idx);
unsigned int num_values = d_vals_multivalue[query_idx].y;
// printf("num_values: %u ", num_values);
// printf("batch: %d, m: %d, i:%d, query_idx: %d, num_values: %u\n", batch_index, index, i, query_idx, num_values);
if(num_values > 0){
for(unsigned int j = 0; j < num_values && cnt < nsample; j++){
idx[index*nsample + cnt] = d_all_values[d_vals_multivalue[query_idx].x + j];
cnt++;
// printf("%d ", idx[index*nsample + cnt]);
}
}
}
pts_cnt[index] = cnt;
for(;cnt < nsample;cnt++){
idx[index*nsample + cnt] = idx[index*nsample];
// printf("%d ", idx[index*nsample + cnt]);
}
// printf("\n");
}
}
void querySquarePointLauncher(int b, int n, int m, float grid_size, int nsample, const float *all_xyz, const float *centroids_xyz, const float *limits, const int *sizes, int *idx, int *pts_cnt, unsigned int *d_keys, unsigned int *d_vals, unsigned int *d_queries, uint2 *d_vals_multivalue) {
// printf("Start\n");
// Allocate the GPU memory.
// unsigned int *d_keys = NULL, *d_vals = NULL, *d_queries = NULL;
// uint2 *d_vals_multivalue = NULL;
unsigned int kInputSize = b * n;
// printf("b %d, n %d, kInputSize: %u\n", b, n, kInputSize);
hipLaunchKernelGGL(( compose_insert_items), dim3(b),dim3(256), 0, 0, b, n, grid_size, all_xyz, limits, sizes, d_keys, d_vals);
hipDeviceSynchronize();
CUDPPHandle theCudpp;
CUDPPResult result = cudppCreate(&theCudpp);
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error initializing CUDPP Library.\n");
exit(-1);
}
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
config.kInputSize = kInputSize;
config.space_usage = 2.0f;
CUDPPHandle hash_table_handle;
result = cudppHashTable(theCudpp, &hash_table_handle, &config);
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppHashTable call in"
"testHashTable (make sure your device is at"
"least compute version 2.0\n");
}
result = cudppHashInsert(hash_table_handle, d_keys,
d_vals, kInputSize);
hipDeviceSynchronize();
// printf("insert values\n");
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppHashInsert call in"
"testHashTable\n");
}
unsigned int values_size;
if (cudppMultivalueHashGetValuesSize(hash_table_handle,
&values_size) !=
CUDPP_SUCCESS){
fprintf(stderr, "Error: "
"cudppMultivalueHashGetValuesSize()\n");
}
unsigned int * d_all_values = NULL;
if (cudppMultivalueHashGetAllValues(hash_table_handle,
&d_all_values) !=
CUDPP_SUCCESS){
fprintf(stderr, "Error: "
"cudppMultivalueHashGetAllValues()\n");
}
hipLaunchKernelGGL(( compose_queries), dim3(b),dim3(256), 0, 0, b, m, grid_size, centroids_xyz, limits, sizes, d_queries);
hipDeviceSynchronize();
result = cudppHashRetrieve(hash_table_handle,
d_queries,
d_vals_multivalue,
b * m * 27);
hipDeviceSynchronize();
// printf("retrieved values\n");
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppHashRetrieve call\n");
}
hipLaunchKernelGGL(( hash_square_idx_gpu), dim3(b),dim3(256), 0, 0, b, n, m, nsample, d_vals_multivalue, d_all_values, idx, pts_cnt);
hipDeviceSynchronize();
// printf("obtain idx\n");
/// -------------------------------------------- Free the table.
result = cudppDestroyHashTable(theCudpp, hash_table_handle);
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppDestroyHashTable call in"
"testHashTable\n");
}
// CUDA_SAFE_CALL(hipFree(d_keys));
// CUDA_SAFE_CALL(hipFree(d_vals));
// CUDA_SAFE_CALL(hipFree(d_queries));
// CUDA_SAFE_CALL(hipFree(d_vals_multivalue));
result = cudppDestroy(theCudpp);
if (result != CUDPP_SUCCESS){
printf("Error shutting down CUDPP Library.\n");
}
// printf("Ends\n");
}
void assignIdxLauncher(int b, int n, int m, int nsample, const bool *true_mat, int *idx){
hipLaunchKernelGGL(( assign_idx_gpu), dim3(b), dim3(256), 0, 0, b,n,m,nsample,true_mat,idx);
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
// printf("before queryBallPointLauncher\n");
hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
// printf("after queryBallPointLauncher\n");
//hipDeviceSynchronize();
}
void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
hipLaunchKernelGGL(( selection_sort_gpu), dim3(b),dim3(256), 0, 0, b,n,m,k,dist,outi,out);
//hipDeviceSynchronize();
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
// printf("before groupPointLauncher\n");
hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out);
// printf("after groupPointLauncher\n");
//hipDeviceSynchronize();
}
void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){
hipLaunchKernelGGL(( group_point_grad_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points);
//group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//hipDeviceSynchronize();
}
| ef6b8068bab64f7db8e72f0c7acac4e52569007a.cu | #include <cstdio>
#include <cudpp_hash.h>
#include <cuda_util.h>
#include <cuda_runtime_api.h>
// input: nsample (1), true_mat (b, m, n)
// output: idx (b, m, nsample)
__global__ void assign_idx_gpu(int b, int n, int m, int nsample, const bool *true_mat, int *idx){
int batch_index = blockIdx.x;
true_mat += n*m*batch_index;
idx += nsample*m*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
//j: the jth centroids within the batch
for (int j = index; j < m; j += stride){
int cnt = 0;
for(int i = 0; i < n; i++){
// printf("batch_index:%d index:%d j:%d i:%d\n", batch_index, index, j, i);
if(true_mat[j*n + i]){
idx[j*nsample + cnt] = i;
cnt++;
}
}
for(; cnt < nsample; cnt++){
idx[j*nsample + cnt] = idx[j*nsample];
}
}
}
//__global__ void assign_idx_gpu(int b, int m, int nsample, const int *true_idx, const int *end_idx, int *idx){
// int batch_index = blockIdx.x;
// end_idx += m*batch_index;
// idx += m*nsample*batch_index;
//
// int index = threadIdx.x;
// int stride = blockDim.x;
//
// //j: the jth centroids within the batch
// for (int j = index; j < m; j += stride){
// int start_idx_;
// if (j == 0 && batch_index == 0){
// start_idx_ = 0;
// }else{
// start_idx_ = end_idx[j-1];
// }
// int end_idx_ = end_idx[j];
//
// int i = 0;
// for (; i < (end_idx_ - start_idx_); i++){
// idx[nsample * j + i] = true_idx[start_idx_ + i];
// }
// for (; i < nsample; i++){
// idx[nsample * j + i] = true_idx[end_idx_ - 1];
// }
// }
//}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
// if (fabsf(x2-x1)<radius && (y2-y1)<radius && (z2-z1)<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
//compose_insert_items<<<b,256>>>(b, n, grid_size, all_xyz, limits, sizes, d_keys, d_vals);
__global__ void compose_insert_items(int b, int n, float grid_size, const float *all_xyz, const float *limits, const int *sizes, unsigned int *d_keys, unsigned int *d_vals){
int index = threadIdx.x;
if(index < n){
int batch_index = blockIdx.x;
all_xyz += batch_index * n * 3;
unsigned int *tmp_d_keys = d_keys + batch_index * n;
unsigned int *tmp_d_vals = d_vals + batch_index * n;
int stride = blockDim.x;
for(int point_idx = index; point_idx < n; point_idx += stride){
unsigned int x_idx = __float2uint_rd((all_xyz[point_idx*3] - limits[0]) / grid_size) + 1;
unsigned int y_idx = __float2uint_rd((all_xyz[point_idx*3+1] - limits[2]) / grid_size) + 1;
unsigned int z_idx = __float2uint_rd((all_xyz[point_idx*3+2] - limits[4]) / grid_size) + 1;
tmp_d_keys[point_idx] = z_idx + sizes[2] * (y_idx + sizes[1] * (x_idx + batch_index * sizes[0]));
tmp_d_vals[point_idx] = point_idx;
// printf("b:%d, point_idx:%d, x:%f, y:%f, z:%f, x_idx:%u, y_idx:%u, z_idx:%u, key: %u, val: %u\n", batch_index, point_idx, all_xyz[point_idx*3], all_xyz[point_idx*3+1], all_xyz[point_idx*3+2], x_idx, y_idx, z_idx, tmp_d_keys[point_idx], tmp_d_vals[point_idx]);
}
}
}
//compose_queries<<<b,256>>>(b, m, grid_size, centroids_xyz, limits, sizes, d_queries);
__global__ void compose_queries(int b, int m, float grid_size, const float *centroids_xyz, const float *limits, const int *sizes, unsigned int *d_queries){
int index = threadIdx.x;
if(index < m){
int stride = blockDim.x;
int batch_index = blockIdx.x;
centroids_xyz += batch_index * m * 3;
unsigned int *tmp_d_queries = d_queries + batch_index * m * 27;
unsigned int x_idx = __float2uint_rd((centroids_xyz[index*3] - limits[0]) / grid_size);
unsigned int y_idx = __float2uint_rd((centroids_xyz[index*3+1] - limits[2]) / grid_size);
unsigned int z_idx = __float2uint_rd((centroids_xyz[index*3+2] - limits[4]) / grid_size);
int cnt = 0;
for(int x_offset = 0; x_offset < 3; x_offset++){
for(int y_offset = 0; y_offset < 3; y_offset++){
for(int z_offset = 0; z_offset < 3; z_offset++){
tmp_d_queries[index*27+cnt] = z_idx + z_offset + sizes[2] * (y_idx + y_offset + sizes[1] * (x_idx + x_offset + batch_index * sizes[0]));
// if(x_offset == 1 && y_offset == 1 && z_offset == 1){
// printf("b:%d, centroids_idx:%d, x:%f, y:%f, z:%f, x_grid:%u, y_grid:%u, z_grid:%u, query_self:%u\n", batch_index, index, centroids_xyz[index*3], centroids_xyz[index*3+1], centroids_xyz[index*3+2], x_offset+x_idx, y_offset+y_idx, z_offset+z_idx, tmp_d_queries[index+cnt]);
// }
cnt++;
}
}
}
}
}
// hash_square_idx_gpu<<<b,256>>>(b, n, m, nsample, d_vals_multivalue, d_sorted_idx, d_all_values, idx, pts_cnt);
// , const unsigned int *sorted_idx
__global__ void hash_square_idx_gpu(int b, int n, int m, int nsample, const uint2 *d_vals_multivalue, const unsigned int * d_all_values, int *idx, int *pts_cnt){
int index = threadIdx.x;
if(index < m){
int stride = blockDim.x;
int batch_index = blockIdx.x;
unsigned int sorted_idx[27] = {13, 4,10,12,14,16,22, 1,3,5,7,9,11,15,17,19,21,23,25, 0,2,6,8,18,20,24,26};
// if(index == 0 && batch_index == 0){
// printf("d_vals_multivalue:\n");
// for(int kk = 0; kk < 2 * 3 * 27; kk++){
// printf("%u %u\n", d_vals_multivalue[kk].x, d_vals_multivalue[kk].y);
// }
// }
idx += batch_index * m * nsample;
pts_cnt += batch_index * m;
int query_idx_base = batch_index*m*27+index*27;
int cnt = 0;
for(int i = 0; i < 27; i++){
int query_idx = query_idx_base + sorted_idx[i];
// int query_idx = batch_index*m*27+index*27+i;
// printf("query_idx: %d ", query_idx);
unsigned int num_values = d_vals_multivalue[query_idx].y;
// printf("num_values: %u ", num_values);
// printf("batch: %d, m: %d, i:%d, query_idx: %d, num_values: %u\n", batch_index, index, i, query_idx, num_values);
if(num_values > 0){
for(unsigned int j = 0; j < num_values && cnt < nsample; j++){
idx[index*nsample + cnt] = d_all_values[d_vals_multivalue[query_idx].x + j];
cnt++;
// printf("%d ", idx[index*nsample + cnt]);
}
}
}
pts_cnt[index] = cnt;
for(;cnt < nsample;cnt++){
idx[index*nsample + cnt] = idx[index*nsample];
// printf("%d ", idx[index*nsample + cnt]);
}
// printf("\n");
}
}
void querySquarePointLauncher(int b, int n, int m, float grid_size, int nsample, const float *all_xyz, const float *centroids_xyz, const float *limits, const int *sizes, int *idx, int *pts_cnt, unsigned int *d_keys, unsigned int *d_vals, unsigned int *d_queries, uint2 *d_vals_multivalue) {
// printf("Start\n");
// Allocate the GPU memory.
// unsigned int *d_keys = NULL, *d_vals = NULL, *d_queries = NULL;
// uint2 *d_vals_multivalue = NULL;
unsigned int kInputSize = b * n;
// printf("b %d, n %d, kInputSize: %u\n", b, n, kInputSize);
compose_insert_items<<<b,256>>>(b, n, grid_size, all_xyz, limits, sizes, d_keys, d_vals);
cudaDeviceSynchronize();
CUDPPHandle theCudpp;
CUDPPResult result = cudppCreate(&theCudpp);
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error initializing CUDPP Library.\n");
exit(-1);
}
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
config.kInputSize = kInputSize;
config.space_usage = 2.0f;
CUDPPHandle hash_table_handle;
result = cudppHashTable(theCudpp, &hash_table_handle, &config);
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppHashTable call in"
"testHashTable (make sure your device is at"
"least compute version 2.0\n");
}
result = cudppHashInsert(hash_table_handle, d_keys,
d_vals, kInputSize);
cudaThreadSynchronize();
// printf("insert values\n");
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppHashInsert call in"
"testHashTable\n");
}
unsigned int values_size;
if (cudppMultivalueHashGetValuesSize(hash_table_handle,
&values_size) !=
CUDPP_SUCCESS){
fprintf(stderr, "Error: "
"cudppMultivalueHashGetValuesSize()\n");
}
unsigned int * d_all_values = NULL;
if (cudppMultivalueHashGetAllValues(hash_table_handle,
&d_all_values) !=
CUDPP_SUCCESS){
fprintf(stderr, "Error: "
"cudppMultivalueHashGetAllValues()\n");
}
compose_queries<<<b,256>>>(b, m, grid_size, centroids_xyz, limits, sizes, d_queries);
cudaDeviceSynchronize();
result = cudppHashRetrieve(hash_table_handle,
d_queries,
d_vals_multivalue,
b * m * 27);
cudaThreadSynchronize();
// printf("retrieved values\n");
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppHashRetrieve call\n");
}
hash_square_idx_gpu<<<b,256>>>(b, n, m, nsample, d_vals_multivalue, d_all_values, idx, pts_cnt);
cudaDeviceSynchronize();
// printf("obtain idx\n");
/// -------------------------------------------- Free the table.
result = cudppDestroyHashTable(theCudpp, hash_table_handle);
if (result != CUDPP_SUCCESS){
fprintf(stderr, "Error in cudppDestroyHashTable call in"
"testHashTable\n");
}
// CUDA_SAFE_CALL(cudaFree(d_keys));
// CUDA_SAFE_CALL(cudaFree(d_vals));
// CUDA_SAFE_CALL(cudaFree(d_queries));
// CUDA_SAFE_CALL(cudaFree(d_vals_multivalue));
result = cudppDestroy(theCudpp);
if (result != CUDPP_SUCCESS){
printf("Error shutting down CUDPP Library.\n");
}
// printf("Ends\n");
}
void assignIdxLauncher(int b, int n, int m, int nsample, const bool *true_mat, int *idx){
assign_idx_gpu<<<b, 256>>>(b,n,m,nsample,true_mat,idx);
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
// printf("before queryBallPointLauncher\n");
query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
// printf("after queryBallPointLauncher\n");
//cudaDeviceSynchronize();
}
void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out);
//cudaDeviceSynchronize();
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
// printf("before groupPointLauncher\n");
group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out);
// printf("after groupPointLauncher\n");
//cudaDeviceSynchronize();
}
void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){
group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//cudaDeviceSynchronize();
}
|
d99d9e22b1eed61123072e830a18e6bbfb0b7fdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
#include "opt_2dhisto.h"
//----------------------------------------------------------------------------
// Kernel Function_1_Main_computation
//----------------------------------------------------------------------------
__device__ void atomicADD(uint32_t *address, uint32_t val) {
unsigned int *address_as_uint = (unsigned int *)address;
unsigned int old = *address_as_uint, assumed;
do {
if(old==UINT8_MAX)
break;
if(old+val>UINT8_MAX)
val = 255-old;
assumed = old;
old = atomicCAS(address_as_uint, assumed, assumed+val);
} while(old != assumed);
}
__global__ void opt_2dhisto_kernel(uint32_t *input_device, int input_size, uint32_t *device_bins)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int i = tx + bx* blockDim.x;
if (device_bins[input_device[i]] <255)
atomicAdd(&(device_bins[input_device[i]]), 1);
//int n = (threadIdx.y*blockDim.x)+threadIdx.x;
//__shared__ uint32_t s_hist[HISTO_WIDTH];
//s_hist[n]=0;
//__syncthreads();
/*
for(int i=0; i<(INPUT_WIDTH/BLOCK_SIZE); i++)
for(int j=0; j<(INPUT_HEIGHT/BLOCK_SIZE); j++)
{
int x = threadIdx.x + i*BLOCK_SIZE;
int y = threadIdx.y + j*BLOCK_SIZE;
if(x<INPUT_WIDTH && y<INPUT_HEIGHT)
atomicADD(&s_hist[*(input+(y*INPUT_WIDTH)+x)], 1);
}
*/
//__syncthreads();
//atomicAdd(&hist[n], s_hist[n]);
// __syncthreads();
}
//----------------------------------------------------------------------------
// HISTO_function
//----------------------------------------------------------------------------
void opt_2dhisto(uint32_t *input_device, uint32_t *device_bins)
{
/* This function should only contain a call to the GPU
histogramming kernel. Any memory allocations and
transfers must be done outside this function */
int size = INPUT_HEIGHT * INPUT_WIDTH;
hipError_t cuda_ret;
hipMemset(device_bins, 0, sizeof(uint32_t) * HISTO_WIDTH);
dim3 dimBlock(BLOCK_SIZE, 1,1);
dim3 dimGrid( ((INPUT_HEIGHT * INPUT_WIDTH + dimBlock.x - 1) / dimBlock.x) ,1,1);
hipLaunchKernelGGL(( opt_2dhisto_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_device, size, device_bins);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) printf("Unable to launch/execute kernel \n");
}
/* Include below the implementation of any other functions you need */
uint8_t *AllocateDeviceMemory(int histo_width, int histo_height, int size_of_element)
{
uint8_t *t_memory;
hipMalloc((void **)&t_memory, histo_width * histo_height * size_of_element);
return t_memory;
}
void free_cuda(uint32_t *ptr)
{
hipFree(ptr);
}
void CopyToDevice(uint32_t *device, uint32_t *host, uint32_t input_height, uint32_t input_width, int size_of_element)
{
const size_t x_size_padded = (input_width + 128) & 0xFFFFFF80;
size_t row_size = input_width * size_of_element;
for(int i=0; i<input_height; i++)
{
hipMemcpy(device, host, row_size, hipMemcpyHostToDevice);
device += input_width;
host += (x_size_padded);
}
}
void CopyToHost(uint32_t *host, uint32_t *device, int size)
{
hipMemcpy(host,device, size, hipMemcpyDeviceToHost);
for(int i = 0; i < HISTO_WIDTH * HISTO_HEIGHT; i++)
host[i] = host[i]>UINT8_MAX?UINT8_MAX:host[i];
}
void cuda_memset(uint32_t *ptr, uint32_t value, uint32_t byte_count)
{
hipMemset((void *)ptr, value, (size_t)byte_count);
}
| d99d9e22b1eed61123072e830a18e6bbfb0b7fdc.cu | #include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
#include "opt_2dhisto.h"
//----------------------------------------------------------------------------
// Kernel Function_1_Main_computation
//----------------------------------------------------------------------------
__device__ void atomicADD(uint32_t *address, uint32_t val) {
unsigned int *address_as_uint = (unsigned int *)address;
unsigned int old = *address_as_uint, assumed;
do {
if(old==UINT8_MAX)
break;
if(old+val>UINT8_MAX)
val = 255-old;
assumed = old;
old = atomicCAS(address_as_uint, assumed, assumed+val);
} while(old != assumed);
}
__global__ void opt_2dhisto_kernel(uint32_t *input_device, int input_size, uint32_t *device_bins)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int i = tx + bx* blockDim.x;
if (device_bins[input_device[i]] <255)
atomicAdd(&(device_bins[input_device[i]]), 1);
//int n = (threadIdx.y*blockDim.x)+threadIdx.x;
//__shared__ uint32_t s_hist[HISTO_WIDTH];
//s_hist[n]=0;
//__syncthreads();
/*
for(int i=0; i<(INPUT_WIDTH/BLOCK_SIZE); i++)
for(int j=0; j<(INPUT_HEIGHT/BLOCK_SIZE); j++)
{
int x = threadIdx.x + i*BLOCK_SIZE;
int y = threadIdx.y + j*BLOCK_SIZE;
if(x<INPUT_WIDTH && y<INPUT_HEIGHT)
atomicADD(&s_hist[*(input+(y*INPUT_WIDTH)+x)], 1);
}
*/
//__syncthreads();
//atomicAdd(&hist[n], s_hist[n]);
// __syncthreads();
}
//----------------------------------------------------------------------------
// HISTO_function
//----------------------------------------------------------------------------
void opt_2dhisto(uint32_t *input_device, uint32_t *device_bins)
{
/* This function should only contain a call to the GPU
histogramming kernel. Any memory allocations and
transfers must be done outside this function */
int size = INPUT_HEIGHT * INPUT_WIDTH;
cudaError_t cuda_ret;
cudaMemset(device_bins, 0, sizeof(uint32_t) * HISTO_WIDTH);
dim3 dimBlock(BLOCK_SIZE, 1,1);
dim3 dimGrid( ((INPUT_HEIGHT * INPUT_WIDTH + dimBlock.x - 1) / dimBlock.x) ,1,1);
opt_2dhisto_kernel<<<dimGrid, dimBlock>>>(input_device, size, device_bins);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) printf("Unable to launch/execute kernel \n");
}
/* Include below the implementation of any other functions you need */
uint8_t *AllocateDeviceMemory(int histo_width, int histo_height, int size_of_element)
{
uint8_t *t_memory;
cudaMalloc((void **)&t_memory, histo_width * histo_height * size_of_element);
return t_memory;
}
void free_cuda(uint32_t *ptr)
{
cudaFree(ptr);
}
void CopyToDevice(uint32_t *device, uint32_t *host, uint32_t input_height, uint32_t input_width, int size_of_element)
{
const size_t x_size_padded = (input_width + 128) & 0xFFFFFF80;
size_t row_size = input_width * size_of_element;
for(int i=0; i<input_height; i++)
{
cudaMemcpy(device, host, row_size, cudaMemcpyHostToDevice);
device += input_width;
host += (x_size_padded);
}
}
void CopyToHost(uint32_t *host, uint32_t *device, int size)
{
cudaMemcpy(host,device, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < HISTO_WIDTH * HISTO_HEIGHT; i++)
host[i] = host[i]>UINT8_MAX?UINT8_MAX:host[i];
}
void cuda_memset(uint32_t *ptr, uint32_t value, uint32_t byte_count)
{
cudaMemset((void *)ptr, value, (size_t)byte_count);
}
|
3c0fb80035c46a225f50de81bcbef19647e7a10f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/layer_norm.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kColwiseReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC m_shared[C10_WARP_SIZE];
__shared__ T_ACC v_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum1 += static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]);
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared);
if (threadIdx.x == 0) {
const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N);
sum1 *= scale;
sum2 = c10::hip::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0));
mean[i] = sum1;
rstd[i] = c10::hip::compat::rsqrt(sum2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
int64_t N,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
T* Y) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
const T_ACC beta_v =
beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]);
Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]) * gamma_v +
beta_v;
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
sum1 +=
static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v;
sum2 += static_cast<T_ACC>(dY[index]) * gamma_v;
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
if (threadIdx.x == 0) {
ds[i] = sum1;
db[i] = sum2;
}
}
template <typename T>
__global__ void ComputeGradientFusedParamsCUDAKernel(
int64_t M,
int64_t N,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c1,
acc_type<T, true>* c2) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < M) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N);
const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) *
static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) *
static_cast<T_ACC>(rstd[index]) * s;
c1[index] = a;
c2[index] =
-(a * static_cast<T_ACC>(mean[index]) +
db[index] * static_cast<T_ACC>(rstd[index]) * s);
}
}
template <typename T>
__global__ void LayerNormBackwardCUDAKenrel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* a,
const acc_type<T, true>* b,
const acc_type<T, true>* c,
T* dX) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
dX[index] =
static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v +
b[i] * static_cast<T_ACC>(X[index]) + c[i];
}
}
template <typename T>
__global__ void GammaBetaBackwardSimpleCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N) {
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = 0; i < M; ++i) {
const int64_t index = i * N + j;
sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index]) *
(static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]);
sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]);
}
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
__shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (j < N) {
for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) {
const int64_t i1 = i;
const int64_t i2 = i + blockDim.y;
const int64_t index1 = i1 * N + j;
const int64_t index2 = i2 * N + j;
dg_sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index1]) *
(static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) *
static_cast<T_ACC>(rstd[i1]);
db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]);
if (i2 < M) {
dg_sum2 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index2]) *
(static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) *
static_cast<T_ACC>(rstd[i2]);
db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]);
}
}
}
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
}
template <typename T>
void LayerNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
T eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
DCHECK_EQ(X.numel(), M * N);
DCHECK(!gamma.defined() || gamma.numel() == N);
DCHECK(!beta.defined() || beta.numel() == N);
const T* X_data = X.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T* Y_data = Y->data_ptr<T>();
T* mean_data = mean->data_ptr<T>();
T* rstd_data = rstd->data_ptr<T>();
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream,
N, eps, X_data, mean_data, rstd_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream,
N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void LayerNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
double eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormKernelImpl", [&]() {
LayerNormKernelImplInternal<scalar_t>(
X, gamma, beta, M, N, static_cast<scalar_t>(eps), Y, mean, rstd);
});
}
template <typename T>
void LayerNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
using T_ACC = acc_type<T, true>;
DCHECK_EQ(dY.numel(), M * N);
DCHECK_EQ(X.numel(), M * N);
DCHECK_EQ(mean.numel(), M);
DCHECK_EQ(rstd.numel(), M);
DCHECK(!gamma.defined() || gamma.numel() == N);
const T* dY_data = dY.template data_ptr<T>();
const T* X_data = X.template data_ptr<T>();
const T* mean_data = mean.template data_ptr<T>();
const T* rstd_data = rstd.template data_ptr<T>();
const T* gamma_data =
gamma.defined() ? gamma.template data_ptr<T>() : nullptr;
T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr;
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (dX_data != nullptr) {
const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type();
Tensor ds = at::empty({M}, X.options().dtype(kAccType));
Tensor db = at::empty({M}, X.options().dtype(kAccType));
Tensor scale = at::empty({M}, X.options().dtype(kAccType));
Tensor bias = at::empty({M}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.template data_ptr<T_ACC>();
T_ACC* db_data = db.template data_ptr<T_ACC>();
T_ACC* scale_data = scale.template data_ptr<T_ACC>();
T_ACC* bias_data = bias.template data_ptr<T_ACC>();
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>)
, dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream,
N, dY_data, X_data, gamma_data, ds_data, db_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( ComputeGradientFusedParamsCUDAKernel<T>)
, dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
M,
N,
mean_data,
rstd_data,
ds_data,
db_data,
scale_data,
bias_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( LayerNormBackwardCUDAKenrel<T>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream,
N,
dY_data,
X_data,
gamma_data,
rstd_data,
scale_data,
bias_data,
dX_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
if (dgamma->defined() || dbeta->defined()) {
T* dgamma_data =
dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr;
T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr;
if (M < 512) {
// For small batch size, do colwise reduce directly.
const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( GammaBetaBackwardSimpleCUDAKernel<T>)
, dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B =
(N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize;
constexpr int kThreadX = kColwiseReduceTileSize;
constexpr int kThreadY = kColwiseReduceTileSize / 2;
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<T>)
, dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream,
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
void LayerNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() {
LayerNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, M, N, dX, dgamma, dbeta);
});
}
} // namespace
std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda(
const Tensor& input,
IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
double eps) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor Y = at::native::empty_like(
X,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor mean = at::empty({M}, X.options());
Tensor rstd = at::empty({M}, X.options());
if (M > 0) {
LayerNormKernelImpl(X, gamma, beta, M, N, eps, &Y, &mean, &rstd);
const auto input_shape = input.sizes();
const size_t axis = input.dim() - normalized_shape.size();
std::vector<int64_t> stat_shape;
for (size_t idx = 0; idx < axis; ++idx) {
stat_shape.push_back(input_shape[idx]);
}
for (size_t idx = axis; idx < input.dim(); ++idx) {
stat_shape.push_back(1);
}
mean = mean.view(stat_shape);
rstd = rstd.view(stat_shape);
}
return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd));
}
std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda(
const Tensor& dY,
const Tensor& input,
IntArrayRef normalized_shape,
const Tensor& mean,
const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
std::array<bool, 3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor dX;
Tensor dgamma;
Tensor dbeta;
if (grad_input_mask[0]) {
dX = at::native::empty_like(
X,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[1]) {
dgamma = M > 0 ? at::native::empty_like(
gamma,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT)
: at::native::zeros_like(
gamma,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[2]) {
dbeta = M > 0 ? at::native::empty_like(
beta,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT)
: at::native::zeros_like(
beta,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (M > 0) {
LayerNormBackwardKernelImpl(
dY, X, mean, rstd, gamma, M, N, &dX, &dgamma, &dbeta);
}
return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta));
}
REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl);
REGISTER_DISPATCH(LayerNormBackwardKernel, &LayerNormBackwardKernelImpl);
} // namespace native
} // namespace at
| 3c0fb80035c46a225f50de81bcbef19647e7a10f.cu | #include <ATen/native/layer_norm.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kColwiseReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC m_shared[C10_WARP_SIZE];
__shared__ T_ACC v_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum1 += static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]);
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared);
if (threadIdx.x == 0) {
const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N);
sum1 *= scale;
sum2 = c10::cuda::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0));
mean[i] = sum1;
rstd[i] = c10::cuda::compat::rsqrt(sum2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void LayerNormForwardCUDAKernel(
int64_t N,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
T* Y) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
const T_ACC beta_v =
beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]);
Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]) * gamma_v +
beta_v;
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
const int64_t i = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
sum1 +=
static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v;
sum2 += static_cast<T_ACC>(dY[index]) * gamma_v;
}
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
if (threadIdx.x == 0) {
ds[i] = sum1;
db[i] = sum2;
}
}
template <typename T>
__global__ void ComputeGradientFusedParamsCUDAKernel(
int64_t M,
int64_t N,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c1,
acc_type<T, true>* c2) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < M) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N);
const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) *
static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) *
static_cast<T_ACC>(rstd[index]) * s;
c1[index] = a;
c2[index] =
-(a * static_cast<T_ACC>(mean[index]) +
db[index] * static_cast<T_ACC>(rstd[index]) * s);
}
}
template <typename T>
__global__ void LayerNormBackwardCUDAKenrel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* a,
const acc_type<T, true>* b,
const acc_type<T, true>* c,
T* dX) {
using T_ACC = acc_type<T, true>;
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]);
dX[index] =
static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v +
b[i] * static_cast<T_ACC>(X[index]) + c[i];
}
}
template <typename T>
__global__ void GammaBetaBackwardSimpleCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N) {
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = 0; i < M; ++i) {
const int64_t index = i * N + j;
sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index]) *
(static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) *
static_cast<T_ACC>(rstd[i]);
sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]);
}
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dg,
T* db) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
__shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1];
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (j < N) {
for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) {
const int64_t i1 = i;
const int64_t i2 = i + blockDim.y;
const int64_t index1 = i1 * N + j;
const int64_t index2 = i2 * N + j;
dg_sum1 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index1]) *
(static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) *
static_cast<T_ACC>(rstd[i1]);
db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]);
if (i2 < M) {
dg_sum2 += dg == nullptr ? T_ACC(0)
: static_cast<T_ACC>(dY[index2]) *
(static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) *
static_cast<T_ACC>(rstd[i2]);
db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]);
}
}
}
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum(sum1);
sum2 = cuda_utils::WarpReduceSum(sum2);
if (threadIdx.x == 0) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (j < N) {
if (dg != nullptr) {
dg[j] = sum1;
}
if (db != nullptr) {
db[j] = sum2;
}
}
}
}
template <typename T>
void LayerNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
T eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
DCHECK_EQ(X.numel(), M * N);
DCHECK(!gamma.defined() || gamma.numel() == N);
DCHECK(!beta.defined() || beta.numel() == N);
const T* X_data = X.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T* Y_data = Y->data_ptr<T>();
T* mean_data = mean->data_ptr<T>();
T* rstd_data = rstd->data_ptr<T>();
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
RowwiseMomentsCUDAKernel<T>
<<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>(
N, eps, X_data, mean_data, rstd_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
LayerNormForwardCUDAKernel<T><<<M, kCUDANumThreads, 0, cuda_stream>>>(
N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void LayerNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t M,
int64_t N,
double eps,
Tensor* Y,
Tensor* mean,
Tensor* rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormKernelImpl", [&]() {
LayerNormKernelImplInternal<scalar_t>(
X, gamma, beta, M, N, static_cast<scalar_t>(eps), Y, mean, rstd);
});
}
template <typename T>
void LayerNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
using T_ACC = acc_type<T, true>;
DCHECK_EQ(dY.numel(), M * N);
DCHECK_EQ(X.numel(), M * N);
DCHECK_EQ(mean.numel(), M);
DCHECK_EQ(rstd.numel(), M);
DCHECK(!gamma.defined() || gamma.numel() == N);
const T* dY_data = dY.template data_ptr<T>();
const T* X_data = X.template data_ptr<T>();
const T* mean_data = mean.template data_ptr<T>();
const T* rstd_data = rstd.template data_ptr<T>();
const T* gamma_data =
gamma.defined() ? gamma.template data_ptr<T>() : nullptr;
T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr;
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
if (dX_data != nullptr) {
const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type();
Tensor ds = at::empty({M}, X.options().dtype(kAccType));
Tensor db = at::empty({M}, X.options().dtype(kAccType));
Tensor scale = at::empty({M}, X.options().dtype(kAccType));
Tensor bias = at::empty({M}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.template data_ptr<T_ACC>();
T_ACC* db_data = db.template data_ptr<T_ACC>();
T_ACC* scale_data = scale.template data_ptr<T_ACC>();
T_ACC* bias_data = bias.template data_ptr<T_ACC>();
ComputeInternalGradientsCUDAKernel<T>
<<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>(
N, dY_data, X_data, gamma_data, ds_data, db_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads;
ComputeGradientFusedParamsCUDAKernel<T>
<<<B, kCUDANumThreads, 0, cuda_stream>>>(
M,
N,
mean_data,
rstd_data,
ds_data,
db_data,
scale_data,
bias_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
LayerNormBackwardCUDAKenrel<T><<<M, kCUDANumThreads, 0, cuda_stream>>>(
N,
dY_data,
X_data,
gamma_data,
rstd_data,
scale_data,
bias_data,
dX_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
if (dgamma->defined() || dbeta->defined()) {
T* dgamma_data =
dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr;
T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr;
if (M < 512) {
// For small batch size, do colwise reduce directly.
const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads;
GammaBetaBackwardSimpleCUDAKernel<T>
<<<B, kCUDANumThreads, 0, cuda_stream>>>(
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B =
(N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize;
constexpr int kThreadX = kColwiseReduceTileSize;
constexpr int kThreadY = kColwiseReduceTileSize / 2;
GammaBetaBackwardCUDAKernel<T>
<<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>(
M,
N,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
void LayerNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t M,
int64_t N,
Tensor* dX,
Tensor* dgamma,
Tensor* dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() {
LayerNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, M, N, dX, dgamma, dbeta);
});
}
} // namespace
std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda(
const Tensor& input,
IntArrayRef normalized_shape, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
double eps) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor Y = at::native::empty_like(
X,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor mean = at::empty({M}, X.options());
Tensor rstd = at::empty({M}, X.options());
if (M > 0) {
LayerNormKernelImpl(X, gamma, beta, M, N, eps, &Y, &mean, &rstd);
const auto input_shape = input.sizes();
const size_t axis = input.dim() - normalized_shape.size();
std::vector<int64_t> stat_shape;
for (size_t idx = 0; idx < axis; ++idx) {
stat_shape.push_back(input_shape[idx]);
}
for (size_t idx = axis; idx < input.dim(); ++idx) {
stat_shape.push_back(1);
}
mean = mean.view(stat_shape);
rstd = rstd.view(stat_shape);
}
return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd));
}
std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda(
const Tensor& dY,
const Tensor& input,
IntArrayRef normalized_shape,
const Tensor& mean,
const Tensor& rstd, const c10::optional<Tensor>& weight_opt /* optional */, const c10::optional<Tensor>& bias_opt /* optional */,
std::array<bool, 3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();});
const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();});
auto inputs = _prepare_layer_norm_inputs(input, normalized_shape, weight, bias);
auto X = std::get<0>(inputs);
auto gamma = std::get<1>(inputs);
auto beta = std::get<2>(inputs);
auto M = std::get<3>(inputs);
auto N = std::get<4>(inputs);
Tensor dX;
Tensor dgamma;
Tensor dbeta;
if (grad_input_mask[0]) {
dX = at::native::empty_like(
X,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[1]) {
dgamma = M > 0 ? at::native::empty_like(
gamma,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT)
: at::native::zeros_like(
gamma,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (grad_input_mask[2]) {
dbeta = M > 0 ? at::native::empty_like(
beta,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT)
: at::native::zeros_like(
beta,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
if (M > 0) {
LayerNormBackwardKernelImpl(
dY, X, mean, rstd, gamma, M, N, &dX, &dgamma, &dbeta);
}
return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta));
}
REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl);
REGISTER_DISPATCH(LayerNormBackwardKernel, &LayerNormBackwardKernelImpl);
} // namespace native
} // namespace at
|
8b7c095afacc50dd86bfdd6cc311d465790ea630.hip | // !!! This is a file automatically generated by hipify!!!
#include <f/device/device_assert/cuda_assert.hpp>
#include <f/device/device_assert/cublas_assert.hpp>
#include <f/device/device_assert/kernel_assert.hpp>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_complex.h>
#include <math_functions.h>
__global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )// Dznrm2<<<1,128>>>(...)
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += re*re + im*im;
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = sqrt(x[0]);
}
__device__ void device_Dznrm2( unsigned long m, double2 *dA, double *dxnorm )
{
double ans = 0.0;
for ( unsigned long index = 0; index != m; ++index )
{
double const real = dA[index].x;
double const imag = dA[index].y;
ans += real*real + imag*imag;
}
dxnorm[0] = ans;
}
__global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += sqrt(re*re + im*im);
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = x[0];
}
//should call with Zscale<<<1, 128>>>(...);
__global__ void Zscal( unsigned long m, double real, double2* dA )
{
const int i = threadIdx.x;
for( unsigned long j = i; j < m; j += 128 )
{
dA[j].x *= real;
dA[j].y *= real;
}
}
__device__ void device_Zscal( unsigned long m, double real, double2* dA )
{
//for ( unsigned long index = 0; index != m; ++index ) <<-- WHY this one doesnot work????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????
for ( unsigned long index = 0; index < m; ++index )
{
dA[index].x *= real;
dA[index].y *= real;
}
}
//TODO: optimization
__global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>>
void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha )
{
typedef double value_type;
typedef double2 complex_type;
typedef unsigned long size_type;
__shared__ value_type _M[16][17];
__shared__ value_type _m[16][17];
__shared__ value_type _N[16][17];
__shared__ value_type _n[16][17];
const size_type bx = blockIdx.x;
const size_type by = blockIdx.y;
const size_type tx = threadIdx.x;
const size_type ty = threadIdx.y;
const size_type row = by * 16 + ty;
const size_type col = bx * 16 + tx;
const size_type iter_n = (dim+15)/16;
value_type R = 0.0;
value_type I = 0.0;
for ( size_type i = 0; i != iter_n; ++i )
{
if ( i * 16 + tx < dim && row < dim )
{
_M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x;
_m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y;
}
else
{
_M[ty][tx] = 0.0;
_m[ty][tx] = 0.0;
}
if ( i * 16 + ty < dim && col < dim )
{
_N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x;
_n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y;
}
else
{
_N[ty][tx] = 0.0;
_n[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for ( size_type j = 0; j != 16; ++j )
{
R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx];
I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx];
}
__syncthreads();
}
if ( row < dim && col < dim )
{
(*( P + row * dim + col )).x = alpha * R;
(*( P + row * dim + col )).y = alpha * I;
}
}
__global__ void //<<<1,128>>>
Zcopy( unsigned long dims, double2* src, double2* dst )
{
unsigned long const i = threadIdx.x;
for( unsigned long j = i; j < dims; j += 128 )
{
(*(dst+j)).x = (*(src+j)).x;
(*(dst+j)).y = (*(src+j)).y;
}
}
__device__ void
device_Zcopy( unsigned long dims, double2* src, double2* dst )
{
for ( unsigned long index = 0; index < dims; ++index )
{
dst[index].x = src[index].x;
dst[index].y = src[index].y;
}
}
__global__ void//<<<1, 128>>>
Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
unsigned long const i = threadIdx.x;
double R = 0.0;
double I = 0.0;
for( unsigned long j = i; j < dims; j += 128 )
{
R = (*(src+j)).x;
I = (*(src+j)).y;
(*(dst+j)).x += real * R - imag * I;
(*(dst+j)).y += real * I + imag * R;
}
}
__device__ void//<<<1, 128>>>
device_Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
for ( unsigned long index = 0; index < dims; ++index )
{
double const R = src[index].x;
double const I = src[index].y;
dst[index].x = real * R - imag * I;
dst[index].y = real * I + imag * R;
}
}
#if 0
__global__ void
compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim )
{
int const row_index = threadIdx.x;
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) );
}
#endif
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double gamma, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) + kx*alpha + ky*beta + gamma ) );
}
}
#if 0
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness * ( *(diag+row_index) + kx*alpha + ky*beta ) );
}
}
#endif
//TODO: optimization
#if 0
Comment:
When working with original global kernel 'extract_intensity_diff_with_offset_zigmoid', the generated residuals( all kinds ) are
a little bit smaller(1.0e-6 order) than the new device routine 'device_extract_intensity_diff_with_offset_zigmoid'
#endif
__global__ void
extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
double const val = *(I_exp+I_offset);
double const df = val - norm * norm * ac_offset - dc_offset;
*(I_diff+I_offset) = df;
*(I_zigmoid+I_offset) = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
__device__ void
device_extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const I_offset = index;
unsigned long const S_offset = column_index + index * dim;
double const real = s[S_offset].x;
double const imag = s[S_offset].y;
double const norm = real*real + imag*imag;
double const val = I_exp[I_offset];
double const df = val - norm * ac_offset - dc_offset;
I_diff[I_offset] = df;
I_zigmoid[I_offset] = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
}
//TODO: optimization
__global__ void
sum_diag( double2* a, unsigned long dim, double real, double imag )
{
int const index = threadIdx.x;
int const offset = index * dim + index;
*(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag );
}
__device__ void
device_sum_diag( double2* a, unsigned long dim, double real, double imag )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const offset = index * dim + index;
a[offset].x += real;
a[offset].y += imag;
}
}
/*
* Input/Output:
*
** ug[M]
* ar[n][n]
* diag[n] ==>> I_diff[n]
** thickness
* dim -- n
* I_exp[n]
** column_index
*
* cache:
* a_[n][n] -- p2p3
* a^2_[n][n] -- s
* a^3_[n][n] -- s_
* P1[n][n]
* P2[n][n]
* P3[n][n]
*
* 1) compose A
* 2) scale to A_
* 3) compute A_^2 A_^3
* 4) compute (P1) (P2) (P3)
* 5) square back
* 6) extract one column
*/
__global__ void
make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double c, double * cuda_I_zigmoid, double* beams, double* kt_factor )
{
unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x;
if ( tilt_index >= tilt_size ) return;
unsigned long const dim = *(cuda_dim + tilt_index);
double* ug = cuda_ug;
unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim;
double* diag = cuda_diag + tilt_index * max_dim;
double* I_exp = cuda_I_exp + tilt_index * max_dim;
double* I_diff = cuda_I_diff + tilt_index * max_dim;
double* I_zigmoid = cuda_I_zigmoid + tilt_index * max_dim;
double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim;
unsigned long dimdim = dim*dim;
//cache should be of size 6*N^2
double2* a_ = cache;
double2* aa_ = a_ + dimdim;
double2* aaa_ = aa_ + dimdim;
double2* p1 = aaa_ + dimdim;
double2* p2 = p1 + dimdim;
double2* p3 = p2 + dimdim;
//reuse memory in latter steps, when a_, aa_ and aaa_ are idle
//double2* p2p3 = a_;
double2* p2p3 = aaa_;
double2* s = aa_;
double2* s_ = aaa_;
//1)
//kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim )) );
//cuda_assert( hipDeviceSynchronize() );
//device_compose_a( ug, ar, diag, thickness, a_, dim );
double const alpha = kt_factor[tilt_index*3];
double const beta = kt_factor[tilt_index*3+1];
double const gamma = kt_factor[tilt_index*3+2];
device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, gamma, beams );
//2)
//TODO
double* the_norm = (double*)aa_;
//kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) );
////kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) );
//cuda_assert( hipDeviceSynchronize() );
device_Dznrm2( dimdim, a_, the_norm );
//double const ratio = (*the_norm) * 53.71920351148152;
double const ratio = (*the_norm) / 5.371920351148152;
unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio));
unsigned long const scaling_factor = 1 << scaler;
double const scale = scaling_factor;
//kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale
//cuda_assert( hipDeviceSynchronize() );
device_Zscal( dimdim, 1.0/scale, a_ );
//3)
dim3 const mm_grids( (dim+15)/16, (dim+15)/16 );
dim3 const mm_threads( 16, 16 );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aa_, a_, a_, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aaa_, aa_, a_, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
//4)
/*
* Maple:
* Digits := 25
* evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0))
* Returns:
* 2.697333461536989227389605+5.184162062649414177834087*I, //c1
* -.3810698456631129990312942+4.384644533145397950369203*I, //c2
* -2.110839800302654737498705+3.089910928725500922777702*I, //c3
* -3.038648072936697089212469+1.586801195758838328803868*I, //c4
* -3.333551485269048803294274, //c5
* -3.038648072936697089212469-1.586801195758838328803868*I, //c6
* -2.110839800302654737498705-3.089910928725500922777702*I, //c7
* -.3810698456631129990312942-4.384644533145397950369203*I, //c8
* 2.697333461536989227389605-5.184162062649414177834087*I //c9
*
* expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c )
* x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I
*
* expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c )
* x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x
*
* expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c )
* x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I
*
* expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9))
* 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7
*/
//4 - p1)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p1 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) );
//cuda_assert( hipDeviceSynchronize() );
device_sum_diag( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 );
//4 - p2)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p2 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, 32.01029973951970099352671, 0.0, p2, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) );
//cuda_assert( hipDeviceSynchronize() );
device_sum_diag( p2, dim, 39.17363072664900708597702, 0.0 );
//4 - p3)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p3 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) );
//cuda_assert( hipDeviceSynchronize() );
device_sum_diag( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 );
//4 - s)
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, p2p3, p2, p3, dim, 0.0016600397351866578333 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s, p1, p2p3, dim, 0.0016600397351866578333 )) );
cuda_assert( hipDeviceSynchronize() );
//5)
if ( scaler != 0 )
{
for ( unsigned long index = 0; index != scaler; ++index )
{
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s_, s, s, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
double2* tmp = s_;
s_ = s;
s = tmp;
}
}
//6)
double const ac_offset = cuda_ug[0];
double const dc_offset = cuda_ug[1];
//kernel_assert( (extract_intensity_diff_with_offset_zigmoid<<<1,dim>>>( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c )) );
//cuda_assert( hipDeviceSynchronize() );
device_extract_intensity_diff_with_offset_zigmoid( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c );
//exclude central pattern
I_diff[0] = 0.0;
I_zigmoid[0] = 0.0;
}
void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double c, double* cuda_I_zigmoid, double* beams, double* kt_factor )
{
//unsigned long const threads = 64;
//unsigned long const threads = 128;
unsigned long const threads = 256;
unsigned long const grids = (tilt_size + threads - 1)/threads;
kernel_assert( (hipLaunchKernelGGL(( make_individual_pattern_intensity_diff), dim3(grids), dim3(threads), 0, 0, cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, c, cuda_I_zigmoid, beams, kt_factor ) ) );
//cuda_assert( hipDeviceSynchronize() );
}
| 8b7c095afacc50dd86bfdd6cc311d465790ea630.cu | #include <f/device/device_assert/cuda_assert.hpp>
#include <f/device/device_assert/cublas_assert.hpp>
#include <f/device/device_assert/kernel_assert.hpp>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuComplex.h>
#include <math_functions.h>
__global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )// Dznrm2<<<1,128>>>(...)
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += re*re + im*im;
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = sqrt(x[0]);
}
__device__ void device_Dznrm2( unsigned long m, double2 *dA, double *dxnorm )
{
double ans = 0.0;
for ( unsigned long index = 0; index != m; ++index )
{
double const real = dA[index].x;
double const imag = dA[index].y;
ans += real*real + imag*imag;
}
dxnorm[0] = ans;
}
__global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += sqrt(re*re + im*im);
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = x[0];
}
//should call with Zscale<<<1, 128>>>(...);
__global__ void Zscal( unsigned long m, double real, double2* dA )
{
const int i = threadIdx.x;
for( unsigned long j = i; j < m; j += 128 )
{
dA[j].x *= real;
dA[j].y *= real;
}
}
__device__ void device_Zscal( unsigned long m, double real, double2* dA )
{
//for ( unsigned long index = 0; index != m; ++index ) <<-- WHY this one doesnot work????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????
for ( unsigned long index = 0; index < m; ++index )
{
dA[index].x *= real;
dA[index].y *= real;
}
}
//TODO: optimization
__global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>>
void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha )
{
typedef double value_type;
typedef double2 complex_type;
typedef unsigned long size_type;
__shared__ value_type _M[16][17];
__shared__ value_type _m[16][17];
__shared__ value_type _N[16][17];
__shared__ value_type _n[16][17];
const size_type bx = blockIdx.x;
const size_type by = blockIdx.y;
const size_type tx = threadIdx.x;
const size_type ty = threadIdx.y;
const size_type row = by * 16 + ty;
const size_type col = bx * 16 + tx;
const size_type iter_n = (dim+15)/16;
value_type R = 0.0;
value_type I = 0.0;
for ( size_type i = 0; i != iter_n; ++i )
{
if ( i * 16 + tx < dim && row < dim )
{
_M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x;
_m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y;
}
else
{
_M[ty][tx] = 0.0;
_m[ty][tx] = 0.0;
}
if ( i * 16 + ty < dim && col < dim )
{
_N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x;
_n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y;
}
else
{
_N[ty][tx] = 0.0;
_n[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for ( size_type j = 0; j != 16; ++j )
{
R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx];
I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx];
}
__syncthreads();
}
if ( row < dim && col < dim )
{
(*( P + row * dim + col )).x = alpha * R;
(*( P + row * dim + col )).y = alpha * I;
}
}
__global__ void //<<<1,128>>>
Zcopy( unsigned long dims, double2* src, double2* dst )
{
unsigned long const i = threadIdx.x;
for( unsigned long j = i; j < dims; j += 128 )
{
(*(dst+j)).x = (*(src+j)).x;
(*(dst+j)).y = (*(src+j)).y;
}
}
__device__ void
device_Zcopy( unsigned long dims, double2* src, double2* dst )
{
for ( unsigned long index = 0; index < dims; ++index )
{
dst[index].x = src[index].x;
dst[index].y = src[index].y;
}
}
__global__ void//<<<1, 128>>>
Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
unsigned long const i = threadIdx.x;
double R = 0.0;
double I = 0.0;
for( unsigned long j = i; j < dims; j += 128 )
{
R = (*(src+j)).x;
I = (*(src+j)).y;
(*(dst+j)).x += real * R - imag * I;
(*(dst+j)).y += real * I + imag * R;
}
}
__device__ void//<<<1, 128>>>
device_Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
for ( unsigned long index = 0; index < dims; ++index )
{
double const R = src[index].x;
double const I = src[index].y;
dst[index].x = real * R - imag * I;
dst[index].y = real * I + imag * R;
}
}
#if 0
__global__ void
compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim )
{
int const row_index = threadIdx.x;
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) );
}
#endif
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double gamma, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) + kx*alpha + ky*beta + gamma ) );
}
}
#if 0
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness * ( *(diag+row_index) + kx*alpha + ky*beta ) );
}
}
#endif
//TODO: optimization
#if 0
Comment:
When working with original global kernel 'extract_intensity_diff_with_offset_zigmoid', the generated residuals( all kinds ) are
a little bit smaller(1.0e-6 order) than the new device routine 'device_extract_intensity_diff_with_offset_zigmoid'
#endif
__global__ void
extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
double const val = *(I_exp+I_offset);
double const df = val - norm * norm * ac_offset - dc_offset;
*(I_diff+I_offset) = df;
*(I_zigmoid+I_offset) = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
__device__ void
device_extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const I_offset = index;
unsigned long const S_offset = column_index + index * dim;
double const real = s[S_offset].x;
double const imag = s[S_offset].y;
double const norm = real*real + imag*imag;
double const val = I_exp[I_offset];
double const df = val - norm * ac_offset - dc_offset;
I_diff[I_offset] = df;
I_zigmoid[I_offset] = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
}
//TODO: optimization
__global__ void
sum_diag( double2* a, unsigned long dim, double real, double imag )
{
int const index = threadIdx.x;
int const offset = index * dim + index;
*(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag );
}
__device__ void
device_sum_diag( double2* a, unsigned long dim, double real, double imag )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const offset = index * dim + index;
a[offset].x += real;
a[offset].y += imag;
}
}
/*
* Input/Output:
*
** ug[M]
* ar[n][n]
* diag[n] ==>> I_diff[n]
** thickness
* dim -- n
* I_exp[n]
** column_index
*
* cache:
* a_[n][n] -- p2p3
* a^2_[n][n] -- s
* a^3_[n][n] -- s_
* P1[n][n]
* P2[n][n]
* P3[n][n]
*
* 1) compose A
* 2) scale to A_
* 3) compute A_^2 A_^3
* 4) compute (P1) (P2) (P3)
* 5) square back
* 6) extract one column
*/
__global__ void
make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double c, double * cuda_I_zigmoid, double* beams, double* kt_factor )
{
unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x;
if ( tilt_index >= tilt_size ) return;
unsigned long const dim = *(cuda_dim + tilt_index);
double* ug = cuda_ug;
unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim;
double* diag = cuda_diag + tilt_index * max_dim;
double* I_exp = cuda_I_exp + tilt_index * max_dim;
double* I_diff = cuda_I_diff + tilt_index * max_dim;
double* I_zigmoid = cuda_I_zigmoid + tilt_index * max_dim;
double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim;
unsigned long dimdim = dim*dim;
//cache should be of size 6*N^2
double2* a_ = cache;
double2* aa_ = a_ + dimdim;
double2* aaa_ = aa_ + dimdim;
double2* p1 = aaa_ + dimdim;
double2* p2 = p1 + dimdim;
double2* p3 = p2 + dimdim;
//reuse memory in latter steps, when a_, aa_ and aaa_ are idle
//double2* p2p3 = a_;
double2* p2p3 = aaa_;
double2* s = aa_;
double2* s_ = aaa_;
//1)
//kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim )) );
//cuda_assert( cudaDeviceSynchronize() );
//device_compose_a( ug, ar, diag, thickness, a_, dim );
double const alpha = kt_factor[tilt_index*3];
double const beta = kt_factor[tilt_index*3+1];
double const gamma = kt_factor[tilt_index*3+2];
device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, gamma, beams );
//2)
//TODO
double* the_norm = (double*)aa_;
//kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) );
////kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Dznrm2( dimdim, a_, the_norm );
//double const ratio = (*the_norm) * 53.71920351148152;
double const ratio = (*the_norm) / 5.371920351148152;
unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio));
unsigned long const scaling_factor = 1 << scaler;
double const scale = scaling_factor;
//kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale
//cuda_assert( cudaDeviceSynchronize() );
device_Zscal( dimdim, 1.0/scale, a_ );
//3)
dim3 const mm_grids( (dim+15)/16, (dim+15)/16 );
dim3 const mm_threads( 16, 16 );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aa_, a_, a_, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aaa_, aa_, a_, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
//4)
/*
* Maple:
* Digits := 25
* evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0))
* Returns:
* 2.697333461536989227389605+5.184162062649414177834087*I, //c1
* -.3810698456631129990312942+4.384644533145397950369203*I, //c2
* -2.110839800302654737498705+3.089910928725500922777702*I, //c3
* -3.038648072936697089212469+1.586801195758838328803868*I, //c4
* -3.333551485269048803294274, //c5
* -3.038648072936697089212469-1.586801195758838328803868*I, //c6
* -2.110839800302654737498705-3.089910928725500922777702*I, //c7
* -.3810698456631129990312942-4.384644533145397950369203*I, //c8
* 2.697333461536989227389605-5.184162062649414177834087*I //c9
*
* expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c )
* x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I
*
* expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c )
* x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x
*
* expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c )
* x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I
*
* expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9))
* 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7
*/
//4 - p1)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p1 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_sum_diag( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 );
//4 - p2)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p2 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, 32.01029973951970099352671, 0.0, p2, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_sum_diag( p2, dim, 39.17363072664900708597702, 0.0 );
//4 - p3)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p3 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_sum_diag( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 );
//4 - s)
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( p2p3, p2, p3, dim, 0.0016600397351866578333 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s, p1, p2p3, dim, 0.0016600397351866578333 )) );
cuda_assert( cudaDeviceSynchronize() );
//5)
if ( scaler != 0 )
{
for ( unsigned long index = 0; index != scaler; ++index )
{
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s_, s, s, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
double2* tmp = s_;
s_ = s;
s = tmp;
}
}
//6)
double const ac_offset = cuda_ug[0];
double const dc_offset = cuda_ug[1];
//kernel_assert( (extract_intensity_diff_with_offset_zigmoid<<<1,dim>>>( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c )) );
//cuda_assert( cudaDeviceSynchronize() );
device_extract_intensity_diff_with_offset_zigmoid( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c );
//exclude central pattern
I_diff[0] = 0.0;
I_zigmoid[0] = 0.0;
}
void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double c, double* cuda_I_zigmoid, double* beams, double* kt_factor )
{
//unsigned long const threads = 64;
//unsigned long const threads = 128;
unsigned long const threads = 256;
unsigned long const grids = (tilt_size + threads - 1)/threads;
kernel_assert( ( make_individual_pattern_intensity_diff<<<grids, threads>>>( cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, c, cuda_I_zigmoid, beams, kt_factor ) ) );
//cuda_assert( cudaDeviceSynchronize() );
}
|
abf52d9046b16f62e35e8606384f994713ce3103.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/customized/kernels/clip_by_value_kernel.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
namespace {
template<typename T, typename F>
__global__ void CudaClipForward(F clip_func, int64_t n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = clip_func(x[i]); }
}
template<typename T, typename F>
__global__ void CudaClipBackward(F clip_func, int64_t n, const T* x, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = clip_func(x[i], dy[i]); }
}
} // namespace
template<typename T>
struct ClipKernelUtil<DeviceType::kGPU, T> {
template<typename F>
static void Forward(DeviceCtx* ctx, F clip_func, const int64_t n, const T* x, T* y) {
RUN_CUDA_KERNEL((CudaClipForward<T, F>), ctx, n, clip_func, n, x, y);
}
template<typename F>
static void Backward(DeviceCtx* ctx, F clip_func, const int64_t n, const T* x, const T* dy,
T* dx) {
RUN_CUDA_KERNEL((CudaClipBackward<T, F>), ctx, n, clip_func, n, x, dy, dx);
}
};
#define INITIATE_CLIP_KERNEL_UTIL_GPU(dtype, dtype_v) \
template struct ClipKernelUtil<DeviceType::kGPU, dtype>; \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Forward( \
DeviceCtx*, ClipByMinFunctor<dtype>, const int64_t n, const dtype*, dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Forward( \
DeviceCtx*, ClipByMaxFunctor<dtype>, const int64_t n, const dtype*, dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Forward( \
DeviceCtx*, ClipByMinMaxFunctor<dtype>, const int64_t n, const dtype*, dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Backward( \
DeviceCtx*, ClipByMinGradFunctor<dtype>, const int64_t n, const dtype*, const dtype*, \
dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Backward( \
DeviceCtx*, ClipByMaxGradFunctor<dtype>, const int64_t n, const dtype*, const dtype*, \
dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Backward( \
DeviceCtx*, ClipByMinMaxGradFunctor<dtype>, const int64_t n, const dtype*, const dtype*, \
dtype*);
OF_PP_FOR_EACH_TUPLE(INITIATE_CLIP_KERNEL_UTIL_GPU, ARITHMETIC_DATA_TYPE_SEQ)
} // namespace oneflow
| abf52d9046b16f62e35e8606384f994713ce3103.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/customized/kernels/clip_by_value_kernel.h"
#include "oneflow/core/device/cuda_util.h"
namespace oneflow {
namespace {
template<typename T, typename F>
__global__ void CudaClipForward(F clip_func, int64_t n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = clip_func(x[i]); }
}
template<typename T, typename F>
__global__ void CudaClipBackward(F clip_func, int64_t n, const T* x, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = clip_func(x[i], dy[i]); }
}
} // namespace
template<typename T>
struct ClipKernelUtil<DeviceType::kGPU, T> {
template<typename F>
static void Forward(DeviceCtx* ctx, F clip_func, const int64_t n, const T* x, T* y) {
RUN_CUDA_KERNEL((CudaClipForward<T, F>), ctx, n, clip_func, n, x, y);
}
template<typename F>
static void Backward(DeviceCtx* ctx, F clip_func, const int64_t n, const T* x, const T* dy,
T* dx) {
RUN_CUDA_KERNEL((CudaClipBackward<T, F>), ctx, n, clip_func, n, x, dy, dx);
}
};
#define INITIATE_CLIP_KERNEL_UTIL_GPU(dtype, dtype_v) \
template struct ClipKernelUtil<DeviceType::kGPU, dtype>; \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Forward( \
DeviceCtx*, ClipByMinFunctor<dtype>, const int64_t n, const dtype*, dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Forward( \
DeviceCtx*, ClipByMaxFunctor<dtype>, const int64_t n, const dtype*, dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Forward( \
DeviceCtx*, ClipByMinMaxFunctor<dtype>, const int64_t n, const dtype*, dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Backward( \
DeviceCtx*, ClipByMinGradFunctor<dtype>, const int64_t n, const dtype*, const dtype*, \
dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Backward( \
DeviceCtx*, ClipByMaxGradFunctor<dtype>, const int64_t n, const dtype*, const dtype*, \
dtype*); \
template void ClipKernelUtil<DeviceType::kGPU, dtype>::Backward( \
DeviceCtx*, ClipByMinMaxGradFunctor<dtype>, const int64_t n, const dtype*, const dtype*, \
dtype*);
OF_PP_FOR_EACH_TUPLE(INITIATE_CLIP_KERNEL_UTIL_GPU, ARITHMETIC_DATA_TYPE_SEQ)
} // namespace oneflow
|
2f6d76d313c0cef5f3811846b6efd51f7ede80db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2017-2023 by XGBoost Contributors
*/
#include <GPUTreeShap/gpu_treeshap.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <any> // for any, any_cast
#include <memory>
#include "../collective/communicator-inl.cuh"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/common.h"
#include "../common/cuda_context.cuh" // for CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/error_msg.h" // for InplacePredictProxy
#include "../data/device_adapter.cuh"
#include "../data/ellpack_page.cuh"
#include "../data/proxy_dmatrix.h"
#include "../gbm/gbtree_model.h"
#include "predict_fn.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
namespace xgboost::predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx, common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool, bst_feature_t, bst_row_t,
size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
// Copy decision trees to device
tree_segments = HostDeviceVector<size_t>({}, gpu_id);
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), gpu_id);
stats = HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(), RTreeNodeStat(), gpu_id);
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(hipMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), hipMemcpyDefault));
dh::safe_cuda(hipMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), hipMemcpyDefault));
}
tree_group = HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id);
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments = HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>(
h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::CategoricalSplitMatrix::Segment>& h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->OutputLength();
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
[[nodiscard]] XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Capacity() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Capacity() > r.Capacity()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Capacity() != 0 || other.categories.Capacity() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{static_cast<int64_t>(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::hip::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::hip::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::CategoricalSplitMatrix::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::CategoricalSplitMatrix::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(hipMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), hipMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
using BitVector = LBitField64;
__global__ void MaskBitVectorKernel(
SparsePageView data, common::Span<RegTree::Node const> d_nodes,
common::Span<std::size_t const> d_tree_segments, common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<std::uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<std::uint32_t const> d_categories, BitVector decision_bits, BitVector missing_bits,
std::size_t tree_begin, std::size_t tree_end, std::size_t num_features, std::size_t num_rows,
std::size_t entry_start, std::size_t num_nodes, bool use_shared, float missing) {
auto const row_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (row_idx >= num_rows) {
return;
}
SparsePageLoader loader(data, use_shared, num_features, num_rows, entry_start, missing);
std::size_t tree_offset = 0;
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
auto const tree_nodes = d_tree.d_tree.size();
for (auto nid = 0; nid < tree_nodes; nid++) {
auto const& node = d_tree.d_tree[nid];
if (node.IsDeleted() || node.IsLeaf()) {
continue;
}
auto const fvalue = loader.GetElement(row_idx, node.SplitIndex());
auto const is_missing = common::CheckNAN(fvalue);
auto const bit_index = row_idx * num_nodes + tree_offset + nid;
if (is_missing) {
missing_bits.Set(bit_index);
} else {
auto const decision = d_tree.HasCategoricalSplit()
? GetDecision<true>(node, nid, fvalue, d_tree.cats)
: GetDecision<false>(node, nid, fvalue, d_tree.cats);
if (decision) {
decision_bits.Set(bit_index);
}
}
}
tree_offset += tree_nodes;
}
}
__device__ float GetLeafWeightByBitVector(bst_row_t ridx, TreeView const& tree,
BitVector const& decision_bits,
BitVector const& missing_bits, std::size_t num_nodes,
std::size_t tree_offset) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
auto const bit_index = ridx * num_nodes + tree_offset + nidx;
if (missing_bits.Check(bit_index)) {
nidx = n.DefaultChild();
} else {
nidx = n.LeftChild() + !decision_bits.Check(bit_index);
}
n = tree.d_tree[nidx];
}
return tree.d_tree[nidx].LeafValue();
}
__global__ void PredictByBitVectorKernel(
common::Span<RegTree::Node const> d_nodes, common::Span<float> d_out_predictions,
common::Span<std::size_t const> d_tree_segments, common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<std::uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<std::uint32_t const> d_categories, BitVector decision_bits, BitVector missing_bits,
std::size_t tree_begin, std::size_t tree_end, std::size_t num_rows, std::size_t num_nodes,
std::uint32_t num_group) {
auto const row_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (row_idx >= num_rows) {
return;
}
std::size_t tree_offset = 0;
if (num_group == 1) {
float sum = 0;
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
sum += GetLeafWeightByBitVector(row_idx, d_tree, decision_bits, missing_bits, num_nodes,
tree_offset);
tree_offset += d_tree.d_tree.size();
}
d_out_predictions[row_idx] += sum;
} else {
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto const tree_group = d_tree_group[tree_idx];
TreeView d_tree{tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = row_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] += GetLeafWeightByBitVector(
row_idx, d_tree, decision_bits, missing_bits, num_nodes, tree_offset);
tree_offset += d_tree.d_tree.size();
}
}
}
class ColumnSplitHelper {
public:
explicit ColumnSplitHelper(Context const* ctx) : ctx_{ctx} {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<float>* out_preds,
gbm::GBTreeModel const& model, DeviceModel const& d_model) const {
CHECK(dmat->PageExists<SparsePage>()) << "Column split for external memory is not support.";
PredictDMatrix(dmat, out_preds, d_model, model.learner_model_param->num_feature,
model.learner_model_param->num_output_group);
}
private:
using BitType = BitVector::value_type;
void PredictDMatrix(DMatrix* dmat, HostDeviceVector<float>* out_preds, DeviceModel const& model,
bst_feature_t num_features, std::uint32_t num_group) const {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
dh::caching_device_vector<BitType> decision_storage{};
dh::caching_device_vector<BitType> missing_storage{};
auto constexpr kBlockThreads = 128;
auto const max_shared_memory_bytes = dh::MaxSharedMemory(ctx_->gpu_id);
auto const shared_memory_bytes =
SharedMemoryBytes<kBlockThreads>(num_features, max_shared_memory_bytes);
auto const use_shared = shared_memory_bytes != 0;
auto const num_nodes = model.nodes.Size();
std::size_t batch_offset = 0;
for (auto const& batch : dmat->GetBatches<SparsePage>()) {
auto const num_rows = batch.Size();
ResizeBitVectors(&decision_storage, &missing_storage, num_rows * num_nodes);
BitVector decision_bits{dh::ToSpan(decision_storage)};
BitVector missing_bits{dh::ToSpan(missing_storage)};
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
std::size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(), num_features);
auto const grid = static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes, ctx_->CUDACtx()->Stream()} (
MaskBitVectorKernel, data, model.nodes.ConstDeviceSpan(),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(), model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(), model.categories.ConstDeviceSpan(),
decision_bits, missing_bits, model.tree_beg_, model.tree_end_, num_features, num_rows,
entry_start, num_nodes, use_shared, nan(""));
AllReduceBitVectors(&decision_storage, &missing_storage);
dh::LaunchKernel {grid, kBlockThreads, 0, ctx_->CUDACtx()->Stream()} (
PredictByBitVectorKernel, model.nodes.ConstDeviceSpan(),
out_preds->DeviceSpan().subspan(batch_offset), model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(), model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(), model.categories.ConstDeviceSpan(),
decision_bits, missing_bits, model.tree_beg_, model.tree_end_, num_rows, num_nodes,
num_group);
batch_offset += batch.Size() * num_group;
}
}
void AllReduceBitVectors(dh::caching_device_vector<BitType>* decision_storage,
dh::caching_device_vector<BitType>* missing_storage) const {
collective::AllReduce<collective::Operation::kBitwiseOR>(
ctx_->gpu_id, decision_storage->data().get(), decision_storage->size());
collective::AllReduce<collective::Operation::kBitwiseAND>(
ctx_->gpu_id, missing_storage->data().get(), missing_storage->size());
collective::Synchronize(ctx_->gpu_id);
}
void ResizeBitVectors(dh::caching_device_vector<BitType>* decision_storage,
dh::caching_device_vector<BitType>* missing_storage,
std::size_t total_bits) const {
auto const size = BitVector::ComputeStorageSize(total_bits);
if (decision_storage->size() < size) {
decision_storage->resize(size);
}
thrust::fill(ctx_->CUDACtx()->CTP(), decision_storage->begin(), decision_storage->end(), 0);
if (missing_storage->size() < size) {
missing_storage->resize(size);
}
thrust::fill(ctx_->CUDACtx()->CTP(), missing_storage->begin(), missing_storage->end(), 0);
}
Context const* ctx_;
};
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->Info().IsColumnSplit()) {
column_split_helper_.PredictBatch(dmat, out_preds, model, d_model);
return;
}
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(ctx_, BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(Context const* ctx)
: Predictor::Predictor{ctx}, column_split_helper_{ctx} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(std::any const& x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = std::any_cast<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(std::shared_ptr<DMatrix> p_m, const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
auto proxy = dynamic_cast<data::DMatrixProxy*>(p_m.get());
CHECK(proxy) << error::InplacePredictProxy();
auto x = proxy->Adapter();
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<data::CupyAdapter,
DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<data::CudfAdapter,
DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(row_idx, ngroup, group, n_features,
n_features, n_features)] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned, bool) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(ctx_, BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
ColumnSplitHelper column_split_helper_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](Context const* ctx) { return new GPUPredictor(ctx); });
} // namespace xgboost::predictor
| 2f6d76d313c0cef5f3811846b6efd51f7ede80db.cu | /**
* Copyright 2017-2023 by XGBoost Contributors
*/
#include <GPUTreeShap/gpu_treeshap.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <any> // for any, any_cast
#include <memory>
#include "../collective/communicator-inl.cuh"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/common.h"
#include "../common/cuda_context.cuh" // for CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/error_msg.h" // for InplacePredictProxy
#include "../data/device_adapter.cuh"
#include "../data/ellpack_page.cuh"
#include "../data/proxy_dmatrix.h"
#include "../gbm/gbtree_model.h"
#include "predict_fn.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
namespace xgboost::predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx, common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool, bst_feature_t, bst_row_t,
size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
// Copy decision trees to device
tree_segments = HostDeviceVector<size_t>({}, gpu_id);
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), gpu_id);
stats = HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(), RTreeNodeStat(), gpu_id);
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(cudaMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), cudaMemcpyDefault));
dh::safe_cuda(cudaMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id);
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments = HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>(
h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::CategoricalSplitMatrix::Segment>& h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->OutputLength();
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
[[nodiscard]] XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Capacity() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Capacity() > r.Capacity()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Capacity() != 0 || other.categories.Capacity() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{static_cast<int64_t>(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::cuda::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::cuda::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::CategoricalSplitMatrix::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::CategoricalSplitMatrix::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(cudaMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), cudaMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
using BitVector = LBitField64;
__global__ void MaskBitVectorKernel(
SparsePageView data, common::Span<RegTree::Node const> d_nodes,
common::Span<std::size_t const> d_tree_segments, common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<std::uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<std::uint32_t const> d_categories, BitVector decision_bits, BitVector missing_bits,
std::size_t tree_begin, std::size_t tree_end, std::size_t num_features, std::size_t num_rows,
std::size_t entry_start, std::size_t num_nodes, bool use_shared, float missing) {
auto const row_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (row_idx >= num_rows) {
return;
}
SparsePageLoader loader(data, use_shared, num_features, num_rows, entry_start, missing);
std::size_t tree_offset = 0;
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
auto const tree_nodes = d_tree.d_tree.size();
for (auto nid = 0; nid < tree_nodes; nid++) {
auto const& node = d_tree.d_tree[nid];
if (node.IsDeleted() || node.IsLeaf()) {
continue;
}
auto const fvalue = loader.GetElement(row_idx, node.SplitIndex());
auto const is_missing = common::CheckNAN(fvalue);
auto const bit_index = row_idx * num_nodes + tree_offset + nid;
if (is_missing) {
missing_bits.Set(bit_index);
} else {
auto const decision = d_tree.HasCategoricalSplit()
? GetDecision<true>(node, nid, fvalue, d_tree.cats)
: GetDecision<false>(node, nid, fvalue, d_tree.cats);
if (decision) {
decision_bits.Set(bit_index);
}
}
}
tree_offset += tree_nodes;
}
}
__device__ float GetLeafWeightByBitVector(bst_row_t ridx, TreeView const& tree,
BitVector const& decision_bits,
BitVector const& missing_bits, std::size_t num_nodes,
std::size_t tree_offset) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
auto const bit_index = ridx * num_nodes + tree_offset + nidx;
if (missing_bits.Check(bit_index)) {
nidx = n.DefaultChild();
} else {
nidx = n.LeftChild() + !decision_bits.Check(bit_index);
}
n = tree.d_tree[nidx];
}
return tree.d_tree[nidx].LeafValue();
}
__global__ void PredictByBitVectorKernel(
common::Span<RegTree::Node const> d_nodes, common::Span<float> d_out_predictions,
common::Span<std::size_t const> d_tree_segments, common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<std::uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<std::uint32_t const> d_categories, BitVector decision_bits, BitVector missing_bits,
std::size_t tree_begin, std::size_t tree_end, std::size_t num_rows, std::size_t num_nodes,
std::uint32_t num_group) {
auto const row_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (row_idx >= num_rows) {
return;
}
std::size_t tree_offset = 0;
if (num_group == 1) {
float sum = 0;
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
sum += GetLeafWeightByBitVector(row_idx, d_tree, decision_bits, missing_bits, num_nodes,
tree_offset);
tree_offset += d_tree.d_tree.size();
}
d_out_predictions[row_idx] += sum;
} else {
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto const tree_group = d_tree_group[tree_idx];
TreeView d_tree{tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = row_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] += GetLeafWeightByBitVector(
row_idx, d_tree, decision_bits, missing_bits, num_nodes, tree_offset);
tree_offset += d_tree.d_tree.size();
}
}
}
class ColumnSplitHelper {
public:
explicit ColumnSplitHelper(Context const* ctx) : ctx_{ctx} {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<float>* out_preds,
gbm::GBTreeModel const& model, DeviceModel const& d_model) const {
CHECK(dmat->PageExists<SparsePage>()) << "Column split for external memory is not support.";
PredictDMatrix(dmat, out_preds, d_model, model.learner_model_param->num_feature,
model.learner_model_param->num_output_group);
}
private:
using BitType = BitVector::value_type;
void PredictDMatrix(DMatrix* dmat, HostDeviceVector<float>* out_preds, DeviceModel const& model,
bst_feature_t num_features, std::uint32_t num_group) const {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
dh::caching_device_vector<BitType> decision_storage{};
dh::caching_device_vector<BitType> missing_storage{};
auto constexpr kBlockThreads = 128;
auto const max_shared_memory_bytes = dh::MaxSharedMemory(ctx_->gpu_id);
auto const shared_memory_bytes =
SharedMemoryBytes<kBlockThreads>(num_features, max_shared_memory_bytes);
auto const use_shared = shared_memory_bytes != 0;
auto const num_nodes = model.nodes.Size();
std::size_t batch_offset = 0;
for (auto const& batch : dmat->GetBatches<SparsePage>()) {
auto const num_rows = batch.Size();
ResizeBitVectors(&decision_storage, &missing_storage, num_rows * num_nodes);
BitVector decision_bits{dh::ToSpan(decision_storage)};
BitVector missing_bits{dh::ToSpan(missing_storage)};
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
std::size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(), num_features);
auto const grid = static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes, ctx_->CUDACtx()->Stream()} (
MaskBitVectorKernel, data, model.nodes.ConstDeviceSpan(),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(), model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(), model.categories.ConstDeviceSpan(),
decision_bits, missing_bits, model.tree_beg_, model.tree_end_, num_features, num_rows,
entry_start, num_nodes, use_shared, nan(""));
AllReduceBitVectors(&decision_storage, &missing_storage);
dh::LaunchKernel {grid, kBlockThreads, 0, ctx_->CUDACtx()->Stream()} (
PredictByBitVectorKernel, model.nodes.ConstDeviceSpan(),
out_preds->DeviceSpan().subspan(batch_offset), model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(), model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(), model.categories.ConstDeviceSpan(),
decision_bits, missing_bits, model.tree_beg_, model.tree_end_, num_rows, num_nodes,
num_group);
batch_offset += batch.Size() * num_group;
}
}
void AllReduceBitVectors(dh::caching_device_vector<BitType>* decision_storage,
dh::caching_device_vector<BitType>* missing_storage) const {
collective::AllReduce<collective::Operation::kBitwiseOR>(
ctx_->gpu_id, decision_storage->data().get(), decision_storage->size());
collective::AllReduce<collective::Operation::kBitwiseAND>(
ctx_->gpu_id, missing_storage->data().get(), missing_storage->size());
collective::Synchronize(ctx_->gpu_id);
}
void ResizeBitVectors(dh::caching_device_vector<BitType>* decision_storage,
dh::caching_device_vector<BitType>* missing_storage,
std::size_t total_bits) const {
auto const size = BitVector::ComputeStorageSize(total_bits);
if (decision_storage->size() < size) {
decision_storage->resize(size);
}
thrust::fill(ctx_->CUDACtx()->CTP(), decision_storage->begin(), decision_storage->end(), 0);
if (missing_storage->size() < size) {
missing_storage->resize(size);
}
thrust::fill(ctx_->CUDACtx()->CTP(), missing_storage->begin(), missing_storage->end(), 0);
}
Context const* ctx_;
};
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->Info().IsColumnSplit()) {
column_split_helper_.PredictBatch(dmat, out_preds, model, d_model);
return;
}
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(ctx_, BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(Context const* ctx)
: Predictor::Predictor{ctx}, column_split_helper_{ctx} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(std::any const& x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = std::any_cast<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(std::shared_ptr<DMatrix> p_m, const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
auto proxy = dynamic_cast<data::DMatrixProxy*>(p_m.get());
CHECK(proxy) << error::InplacePredictProxy();
auto x = proxy->Adapter();
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<data::CupyAdapter,
DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<data::CudfAdapter,
DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(row_idx, ngroup, group, n_features,
n_features, n_features)] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned, bool) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(ctx_, BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
ColumnSplitHelper column_split_helper_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](Context const* ctx) { return new GPUPredictor(ctx); });
} // namespace xgboost::predictor
|
c17f9a7b191fc05f78f3969791145ce5ea43b4bb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cal_pi.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *sum = NULL;
hipMalloc(&sum, XSIZE*YSIZE);
int nbin = 1;
double step = 1;
int nthreads = 1;
int nblocks = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cal_pi), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,nbin,step,nthreads,nblocks);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cal_pi), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,nbin,step,nthreads,nblocks);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cal_pi), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,nbin,step,nthreads,nblocks);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c17f9a7b191fc05f78f3969791145ce5ea43b4bb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cal_pi.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *sum = NULL;
cudaMalloc(&sum, XSIZE*YSIZE);
int nbin = 1;
double step = 1;
int nthreads = 1;
int nblocks = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cal_pi<<<gridBlock,threadBlock>>>(sum,nbin,step,nthreads,nblocks);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cal_pi<<<gridBlock,threadBlock>>>(sum,nbin,step,nthreads,nblocks);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cal_pi<<<gridBlock,threadBlock>>>(sum,nbin,step,nthreads,nblocks);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d6fb6a2e0ae07f04a8e7c560d58627a942c6ea97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.141592
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void raysum(float *dev_f , float *dev_r , int wdF, int wdR, float dtheta, float dt, int nrays){
float ini, delta, x, y, cumsum, tol, ctheta, stheta, ttheta, theta, t;
int X, Y, i, j;
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
if ((i<wdR) && (j < nrays) ){
theta = i*dtheta;
t = -1.0 + j*dt;
tol = 1.0/sqrtf(2);
ini = -tol;
delta = (float) sqrtf(2)/(wdF-1);
ctheta = cosf(theta);
stheta = sinf(theta);
ttheta = tanf(theta);
if(stheta < tol){
cumsum = 0;
for(Y = 0; Y < wdF; Y++){
y = ini + Y*delta;
x = (t/ctheta - y*ttheta);
X = (int) floorf((x - ini)/delta);
if(X > -1 && X < wdF-1){
cumsum += (dev_f[Y*wdF + (X+1)] - dev_f[Y*wdF + X])*(x - (ini + X*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(ctheta);
}
else{
cumsum = 0;
for(X = 0; X < wdF; X++){
x = ini + X*delta;
y = (t/stheta - x/ttheta);
Y = (int) floorf((y - ini)/delta);
if(Y > -1 && Y < wdF-1){
cumsum += (dev_f[(Y+1)*wdF + X] - dev_f[Y*wdF + X])*(y - (ini + Y*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(stheta);
}
}
}
int main(int argc, char *argv[]) {
int i, j;
float dt, dtheta;
//hipEvent_t start, stop;
//hipEventCreate(&start);
//hipEventCreate(&stop);
//float milliseconds = 0;
int sizeImage = atoi(argv[2]);
int nrays = atoi(argv[3]);
int nangles = atoi(argv[4]);
int wdf = sizeImage;
int wdr = nangles;
FILE *fp = fopen(argv[1], "r");
float *f;
float *radon;
float *dev_r = NULL;
float *dev_f = NULL;
unsigned int grid1, grid2;
grid1 = (unsigned int) ceilf(((float)(nangles)/16));
grid2 = (unsigned int) ceilf(((float)(nrays)/16));
fprintf(stderr, "%d %d\n", grid1, grid2);
dim3 grid(grid1, grid2, 1);
dim3 blocks(16, 16, 1);
CUDA_CHECK_RETURN(hipMalloc((void**) &dev_f, sizeof(float)*sizeImage*sizeImage));
CUDA_CHECK_RETURN(hipMalloc((void **)&dev_r , nangles*nrays*sizeof(float) ) );
radon = (float *)malloc(nangles*nrays*sizeof(float));
f = (float *)malloc(sizeImage*sizeImage*sizeof(float));
for (i = 0; i < sizeImage*sizeImage; i++)
fscanf(fp, "%f", &f[i]);
CUDA_CHECK_RETURN(hipMemcpy (dev_f , f , sizeImage*sizeImage*sizeof(float) , hipMemcpyHostToDevice));
//hipEventRecord(start);
dt = 2.0/(nrays-1);
dtheta = PI/(nangles-1);
hipLaunchKernelGGL(( raysum), dim3(grid), dim3(blocks), 0, 0, dev_f, dev_r, wdf, wdr, dtheta, dt, nrays);
//hipEventRecord(stop);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipMemcpy (radon , dev_r , nangles*nrays*sizeof(float) , hipMemcpyDeviceToHost) );
//hipEventSynchronize(stop);
//hipEventElapsedTime(&milliseconds, start, stop);
//printf("%f\n", milliseconds);
for ( i = 0; i < nrays ; i++){
for(j=0 ; j<nangles; j++){
fprintf(stdout, "%f ", radon[(nrays-1-i)*wdr + (nangles-1-j)]);
}
fprintf(stdout, "\n");
}
CUDA_CHECK_RETURN(hipFree((void*) dev_r));
CUDA_CHECK_RETURN(hipFree((void*) dev_f));
CUDA_CHECK_RETURN(hipDeviceReset());
free(radon);
free(f);
fclose(fp);
return 0;
}
| d6fb6a2e0ae07f04a8e7c560d58627a942c6ea97.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.141592
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void raysum(float *dev_f , float *dev_r , int wdF, int wdR, float dtheta, float dt, int nrays){
float ini, delta, x, y, cumsum, tol, ctheta, stheta, ttheta, theta, t;
int X, Y, i, j;
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
if ((i<wdR) && (j < nrays) ){
theta = i*dtheta;
t = -1.0 + j*dt;
tol = 1.0/sqrtf(2);
ini = -tol;
delta = (float) sqrtf(2)/(wdF-1);
ctheta = cosf(theta);
stheta = sinf(theta);
ttheta = tanf(theta);
if(stheta < tol){
cumsum = 0;
for(Y = 0; Y < wdF; Y++){
y = ini + Y*delta;
x = (t/ctheta - y*ttheta);
X = (int) floorf((x - ini)/delta);
if(X > -1 && X < wdF-1){
cumsum += (dev_f[Y*wdF + (X+1)] - dev_f[Y*wdF + X])*(x - (ini + X*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(ctheta);
}
else{
cumsum = 0;
for(X = 0; X < wdF; X++){
x = ini + X*delta;
y = (t/stheta - x/ttheta);
Y = (int) floorf((y - ini)/delta);
if(Y > -1 && Y < wdF-1){
cumsum += (dev_f[(Y+1)*wdF + X] - dev_f[Y*wdF + X])*(y - (ini + Y*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(stheta);
}
}
}
int main(int argc, char *argv[]) {
int i, j;
float dt, dtheta;
//cudaEvent_t start, stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//float milliseconds = 0;
int sizeImage = atoi(argv[2]);
int nrays = atoi(argv[3]);
int nangles = atoi(argv[4]);
int wdf = sizeImage;
int wdr = nangles;
FILE *fp = fopen(argv[1], "r");
float *f;
float *radon;
float *dev_r = NULL;
float *dev_f = NULL;
unsigned int grid1, grid2;
grid1 = (unsigned int) ceilf(((float)(nangles)/16));
grid2 = (unsigned int) ceilf(((float)(nrays)/16));
fprintf(stderr, "%d %d\n", grid1, grid2);
dim3 grid(grid1, grid2, 1);
dim3 blocks(16, 16, 1);
CUDA_CHECK_RETURN(cudaMalloc((void**) &dev_f, sizeof(float)*sizeImage*sizeImage));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dev_r , nangles*nrays*sizeof(float) ) );
radon = (float *)malloc(nangles*nrays*sizeof(float));
f = (float *)malloc(sizeImage*sizeImage*sizeof(float));
for (i = 0; i < sizeImage*sizeImage; i++)
fscanf(fp, "%f", &f[i]);
CUDA_CHECK_RETURN(cudaMemcpy (dev_f , f , sizeImage*sizeImage*sizeof(float) , cudaMemcpyHostToDevice));
//cudaEventRecord(start);
dt = 2.0/(nrays-1);
dtheta = PI/(nangles-1);
raysum<<<grid, blocks>>>(dev_f, dev_r, wdf, wdr, dtheta, dt, nrays);
//cudaEventRecord(stop);
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy (radon , dev_r , nangles*nrays*sizeof(float) , cudaMemcpyDeviceToHost) );
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&milliseconds, start, stop);
//printf("%f\n", milliseconds);
for ( i = 0; i < nrays ; i++){
for(j=0 ; j<nangles; j++){
fprintf(stdout, "%f ", radon[(nrays-1-i)*wdr + (nangles-1-j)]);
}
fprintf(stdout, "\n");
}
CUDA_CHECK_RETURN(cudaFree((void*) dev_r));
CUDA_CHECK_RETURN(cudaFree((void*) dev_f));
CUDA_CHECK_RETURN(cudaDeviceReset());
free(radon);
free(f);
fclose(fp);
return 0;
}
|
91ef34881f94e24ba3be91b06f1a95006d2e1155.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelPowerTwo(const float *a, const float *b, const unsigned int length, float *a2, float *b2)
{
//TODO:
} | 91ef34881f94e24ba3be91b06f1a95006d2e1155.cu | #include "includes.h"
__global__ void kernelPowerTwo(const float *a, const float *b, const unsigned int length, float *a2, float *b2)
{
//TODO:
} |
2a9ce3d7f1cf86e00cf853f3f44f062290b34174.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This version of my sudoku solver will make use of cuda to attemp to gain speedups
*/
#include <iostream>
#include <fstream>
//#include <chrono>
#define boardSize 81
#define sideSize 9
using namespace std;
struct Board{
int values[81];
bool isFixedValue[81];
bool isPossibleValue[81][9];
//int coordinates;
};
void getFileName(char fileName[]){
cout << "Please give the file location of your sudoku board."<<endl;
cin.getline(fileName, 255);
return;
}
bool getBoard(char fileName[], Board &mainBoard){
ifstream file(fileName);//input can only occur on host this can't be parallelized
if(file.fail())
return false;
char temp = '0';
for (int i = 0; i < boardSize; i++){
file >> temp;
mainBoard.values[i] = (int)((int)temp - (int)'0');
//cout << mainBoard.values[i] << ",";
if(mainBoard.values[i] > 0){
mainBoard.isFixedValue[i] = true;
}
else{
mainBoard.isFixedValue[i] = false;
mainBoard.values[i] = (mainBoard.values[i] / -1);
}
}
file.close();
return true;
}
__global__ void rowCheckKernel(const int board[], const int row, const int value, bool *hasDuplicates)
{
int i = (row*sideSize) + threadIdx.x;
if(value == board[i])
*hasDuplicates = true;//for some reason this doesn't want to pass back the correct value, always the same one
}
bool anyDuplicates(int board[], int coordinates, int value)
{
int row = coordinates / sideSize;
int column = coordinates % sideSize;
//dim3 grid(sideSize);
//bool *hasDuplicates = false;
//rowCheckKernel<<<1,grid>>>(board,row,value,hasDuplicates);
//bool test = &hasDuplicates;
/*if(test){
return test;
}*/
for(int i = row * sideSize; i < ((row*sideSize)+sideSize); i++)
{
if(value == board[i]){
return true;
}
}
for(int i = column; i < boardSize; i+=sideSize)
{
if(value == board[i])
return true;
}
while(column%3 != 0)
column--;
while(row%3 != 0)
row--;
for(int r = row; r < (row+3); r++)
{
for(int c = column; c < (column+3); c++)
{
if(value == board[((r*9)+c)])
return true;
}
}
return false;
}
__global__ void validateBoardKernel(Board board, bool &isValid)
{
int tempValue = board.values[threadIdx.x];
board.values[threadIdx.x] = 0;
if(tempValue != 0 /*&& anyDuplicates(board.values, threadIdx.x, tempValue)*/){
isValid = false;
}
}
bool validateBoard(Board board){//when I attempted to paralellize this it would
for(int i = 0; i < boardSize; i++){
int tempValue = board.values[i];
board.values[i] = 0;
if(tempValue != 0 && anyDuplicates(board.values, i, tempValue)){
cout<<"ERROR: Duplicate value '"
<< tempValue;
return false;
}
board.values[i] = tempValue;
}
return true;
}
void displayBoard(Board board)
{//this cannot be implemented in parallel I don't think, everything I found said input output had to occur on host
ofstream results("resultsParallel.txt");
for(int i = 0; i < boardSize; i++){
results<<board.values[i] <<",";
if(i%sideSize == 8)
results<<endl;
}
results.close();
}
__global__ void isSolvedKernel(const Board board, bool &solved)
{
if(board.values[threadIdx.x] == 0)
solved = true;
}
bool isSolved(Board board)
{
bool solved = true;
dim3 grid(boardSize);
hipLaunchKernelGGL(( isSolvedKernel), dim3(1), dim3(grid), 0, 0, board,solved);
return solved;
}
bool canChange(Board mainBoard, int coordinates, int value)
{//this function has no behavior worth parallelizing
if(value==0)
return true;
if(mainBoard.isFixedValue[coordinates])
return false;
if(anyDuplicates(mainBoard.values, coordinates, value))
return false;
return true;
}
__global__ void checkPossiblesKernel(Board &newBoard, bool &noChanges)
{
int possibles = 0;
int value;
if(!newBoard.isFixedValue[threadIdx.x]){
for(int guess = 1; guess <= sideSize; guess++){
if(/*canChange(newBoard, threadIdx.x, guess)can't use host function from global*/guess==0 || !newBoard.isFixedValue[threadIdx.x]){
value = guess;
newBoard.isPossibleValue[threadIdx.x][guess-1] = true;
possibles++;
}
else
newBoard.isPossibleValue[threadIdx.x][guess-1] = false;
}
}
if(possibles==1){
newBoard.values[threadIdx.x] = value;
newBoard.isFixedValue[threadIdx.x] = true;
noChanges = false;
}
}
bool checkPossibles(Board &newBoard)
{
bool noChanges;
do{
noChanges = true;
for(int i = 0; i < boardSize; i++){
int possibles = 0;
int value;
if(!newBoard.isFixedValue[i]){
for(int guess = 1; guess <= 9; guess++){
if(canChange(newBoard, i, guess)){
value = guess;
newBoard.isPossibleValue[i][guess-1] = true;
possibles++;
}
else
newBoard.isPossibleValue[i][guess-1] = false;
}
}
if(possibles==1){
newBoard.values[i] = value;
newBoard.isFixedValue[i] = true;
noChanges = false;
}
}
}while(noChanges == false);
if(isSolved(newBoard))
return true;
return false;
}
/*dfs is very difficult to do in parallel
I couldn't figure out how to do it
__global__ void dfs(Board &board)
{
}
Board parallelBrute(Board board)
{
}
*/
Board recursiveBrute(Board board, int startPosition)
{
while(startPosition < boardSize && board.isFixedValue[startPosition] && board.values[startPosition] != 0)
startPosition++;
if(startPosition >= boardSize)
return board;
for(int guess = 1; guess <= 9; guess++){
Board tempBoard = board;
if(board.isPossibleValue[startPosition][guess-1] && canChange(board,startPosition,guess)){
board.values[startPosition] = guess;
tempBoard = recursiveBrute(board, startPosition+1);
}
if(isSolved(tempBoard))
return tempBoard;
}
board.values[startPosition] = 0;
return board;
}
__global__ void solveKernel(Board &newBoard, bool &noChanges)
{
for(int guess = 1; guess <= sideSize; guess++){
int total = 0;
for(int iRow = threadIdx.x; iRow < (threadIdx.x+sideSize); iRow++){
if(!newBoard.isFixedValue[iRow])
total += newBoard.isPossibleValue[iRow][guess-1];
}
if(total == 1){
for(int iRow = threadIdx.x; iRow <(threadIdx.x + sideSize); iRow++){
if(newBoard.isPossibleValue[iRow][guess-1] && !newBoard.isFixedValue[iRow]){
newBoard.values[iRow] = guess;
newBoard.isFixedValue[iRow] = true;
noChanges = false;
}
}
}
}
}
void solve(Board &newBoard)
{
bool noChanges;
do
{
noChanges = true;
if(checkPossibles(newBoard))
return;
for(int i = 0; i < boardSize; i += sideSize){
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iRow = i; iRow < (i+9); iRow++){
if(!newBoard.isFixedValue[iRow])
total += newBoard.isPossibleValue[iRow][guess-1];
}
if(total == 1){
for(int iRow = i; iRow <(i+9); iRow++){
if(newBoard.isPossibleValue[iRow][guess-1] && !newBoard.isFixedValue[iRow]){
newBoard.values[iRow] = guess;
newBoard.isFixedValue[iRow] = true;
noChanges = false;
}
}
}
}
}
if(checkPossibles(newBoard))
return;
for(int i = 0; i < sideSize; i++){
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iColumn = i; iColumn < boardSize; iColumn += sideSize){
if(!newBoard.isFixedValue[iColumn]){
total += newBoard.isPossibleValue[iColumn][guess-1];
}
}
if(total == 1){
for(int iColumn = i; iColumn < boardSize; iColumn += sideSize){
if(newBoard.isPossibleValue[iColumn][guess-1] && !newBoard.isFixedValue[iColumn]){
newBoard.values[iColumn] = guess;
newBoard.isFixedValue[iColumn] = true;
noChanges = false;
}
}
}
}
}
if(checkPossibles(newBoard))
return;
for(int r = 0; r < boardSize; r+= sideSize*3){
for(int c = 0; c < sideSize; c += 3){
int i = r+c;
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iBlockR = i; iBlockR < (i+sideSize*3); iBlockR += sideSize){
for(int iBlockC = iBlockR; iBlockC < (iBlockR + 3); iBlockC++){
if(!newBoard.isFixedValue[iBlockC])
total += newBoard.isPossibleValue[iBlockC][guess-1];
}
}
if(total == 1){
for(int iBlockR = i; iBlockR < (i+sideSize*3); iBlockR += sideSize){
for(int iBlockC = iBlockR; iBlockC < (iBlockR + 3); iBlockC++){
if(newBoard.isPossibleValue[iBlockC][guess-1] && !newBoard.isFixedValue[iBlockC]){
newBoard.values[iBlockC] = guess;
newBoard.isFixedValue[iBlockC] = true;
noChanges = false;
}
}
}
}
}
}
}
if(checkPossibles(newBoard))
return;
}while(noChanges == false);
if(!isSolved(newBoard))
newBoard = recursiveBrute(newBoard, 0);
}
int main(int argc, char *argv[]){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
char fileName[256];
Board mainBoard;
getFileName(fileName);
if(!getBoard(fileName,mainBoard))
cout <<"Error with filename";
else if(!validateBoard(mainBoard)){
displayBoard(mainBoard);
return 0;
}
solve(mainBoard);
displayBoard(mainBoard);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout <<"Solve time took " << milliseconds << "ms"<<endl;
return 0;
}
| 2a9ce3d7f1cf86e00cf853f3f44f062290b34174.cu | /*
This version of my sudoku solver will make use of cuda to attemp to gain speedups
*/
#include <iostream>
#include <fstream>
//#include <chrono>
#define boardSize 81
#define sideSize 9
using namespace std;
struct Board{
int values[81];
bool isFixedValue[81];
bool isPossibleValue[81][9];
//int coordinates;
};
void getFileName(char fileName[]){
cout << "Please give the file location of your sudoku board."<<endl;
cin.getline(fileName, 255);
return;
}
bool getBoard(char fileName[], Board &mainBoard){
ifstream file(fileName);//input can only occur on host this can't be parallelized
if(file.fail())
return false;
char temp = '0';
for (int i = 0; i < boardSize; i++){
file >> temp;
mainBoard.values[i] = (int)((int)temp - (int)'0');
//cout << mainBoard.values[i] << ",";
if(mainBoard.values[i] > 0){
mainBoard.isFixedValue[i] = true;
}
else{
mainBoard.isFixedValue[i] = false;
mainBoard.values[i] = (mainBoard.values[i] / -1);
}
}
file.close();
return true;
}
__global__ void rowCheckKernel(const int board[], const int row, const int value, bool *hasDuplicates)
{
int i = (row*sideSize) + threadIdx.x;
if(value == board[i])
*hasDuplicates = true;//for some reason this doesn't want to pass back the correct value, always the same one
}
bool anyDuplicates(int board[], int coordinates, int value)
{
int row = coordinates / sideSize;
int column = coordinates % sideSize;
//dim3 grid(sideSize);
//bool *hasDuplicates = false;
//rowCheckKernel<<<1,grid>>>(board,row,value,hasDuplicates);
//bool test = &hasDuplicates;
/*if(test){
return test;
}*/
for(int i = row * sideSize; i < ((row*sideSize)+sideSize); i++)
{
if(value == board[i]){
return true;
}
}
for(int i = column; i < boardSize; i+=sideSize)
{
if(value == board[i])
return true;
}
while(column%3 != 0)
column--;
while(row%3 != 0)
row--;
for(int r = row; r < (row+3); r++)
{
for(int c = column; c < (column+3); c++)
{
if(value == board[((r*9)+c)])
return true;
}
}
return false;
}
__global__ void validateBoardKernel(Board board, bool &isValid)
{
int tempValue = board.values[threadIdx.x];
board.values[threadIdx.x] = 0;
if(tempValue != 0 /*&& anyDuplicates(board.values, threadIdx.x, tempValue)*/){
isValid = false;
}
}
bool validateBoard(Board board){//when I attempted to paralellize this it would
for(int i = 0; i < boardSize; i++){
int tempValue = board.values[i];
board.values[i] = 0;
if(tempValue != 0 && anyDuplicates(board.values, i, tempValue)){
cout<<"ERROR: Duplicate value '"
<< tempValue;
return false;
}
board.values[i] = tempValue;
}
return true;
}
void displayBoard(Board board)
{//this cannot be implemented in parallel I don't think, everything I found said input output had to occur on host
ofstream results("resultsParallel.txt");
for(int i = 0; i < boardSize; i++){
results<<board.values[i] <<",";
if(i%sideSize == 8)
results<<endl;
}
results.close();
}
__global__ void isSolvedKernel(const Board board, bool &solved)
{
if(board.values[threadIdx.x] == 0)
solved = true;
}
bool isSolved(Board board)
{
bool solved = true;
dim3 grid(boardSize);
isSolvedKernel<<<1, grid>>>(board,solved);
return solved;
}
bool canChange(Board mainBoard, int coordinates, int value)
{//this function has no behavior worth parallelizing
if(value==0)
return true;
if(mainBoard.isFixedValue[coordinates])
return false;
if(anyDuplicates(mainBoard.values, coordinates, value))
return false;
return true;
}
__global__ void checkPossiblesKernel(Board &newBoard, bool &noChanges)
{
int possibles = 0;
int value;
if(!newBoard.isFixedValue[threadIdx.x]){
for(int guess = 1; guess <= sideSize; guess++){
if(/*canChange(newBoard, threadIdx.x, guess)can't use host function from global*/guess==0 || !newBoard.isFixedValue[threadIdx.x]){
value = guess;
newBoard.isPossibleValue[threadIdx.x][guess-1] = true;
possibles++;
}
else
newBoard.isPossibleValue[threadIdx.x][guess-1] = false;
}
}
if(possibles==1){
newBoard.values[threadIdx.x] = value;
newBoard.isFixedValue[threadIdx.x] = true;
noChanges = false;
}
}
bool checkPossibles(Board &newBoard)
{
bool noChanges;
do{
noChanges = true;
for(int i = 0; i < boardSize; i++){
int possibles = 0;
int value;
if(!newBoard.isFixedValue[i]){
for(int guess = 1; guess <= 9; guess++){
if(canChange(newBoard, i, guess)){
value = guess;
newBoard.isPossibleValue[i][guess-1] = true;
possibles++;
}
else
newBoard.isPossibleValue[i][guess-1] = false;
}
}
if(possibles==1){
newBoard.values[i] = value;
newBoard.isFixedValue[i] = true;
noChanges = false;
}
}
}while(noChanges == false);
if(isSolved(newBoard))
return true;
return false;
}
/*dfs is very difficult to do in parallel
I couldn't figure out how to do it
__global__ void dfs(Board &board)
{
}
Board parallelBrute(Board board)
{
}
*/
Board recursiveBrute(Board board, int startPosition)
{
while(startPosition < boardSize && board.isFixedValue[startPosition] && board.values[startPosition] != 0)
startPosition++;
if(startPosition >= boardSize)
return board;
for(int guess = 1; guess <= 9; guess++){
Board tempBoard = board;
if(board.isPossibleValue[startPosition][guess-1] && canChange(board,startPosition,guess)){
board.values[startPosition] = guess;
tempBoard = recursiveBrute(board, startPosition+1);
}
if(isSolved(tempBoard))
return tempBoard;
}
board.values[startPosition] = 0;
return board;
}
__global__ void solveKernel(Board &newBoard, bool &noChanges)
{
for(int guess = 1; guess <= sideSize; guess++){
int total = 0;
for(int iRow = threadIdx.x; iRow < (threadIdx.x+sideSize); iRow++){
if(!newBoard.isFixedValue[iRow])
total += newBoard.isPossibleValue[iRow][guess-1];
}
if(total == 1){
for(int iRow = threadIdx.x; iRow <(threadIdx.x + sideSize); iRow++){
if(newBoard.isPossibleValue[iRow][guess-1] && !newBoard.isFixedValue[iRow]){
newBoard.values[iRow] = guess;
newBoard.isFixedValue[iRow] = true;
noChanges = false;
}
}
}
}
}
void solve(Board &newBoard)
{
bool noChanges;
do
{
noChanges = true;
if(checkPossibles(newBoard))
return;
for(int i = 0; i < boardSize; i += sideSize){
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iRow = i; iRow < (i+9); iRow++){
if(!newBoard.isFixedValue[iRow])
total += newBoard.isPossibleValue[iRow][guess-1];
}
if(total == 1){
for(int iRow = i; iRow <(i+9); iRow++){
if(newBoard.isPossibleValue[iRow][guess-1] && !newBoard.isFixedValue[iRow]){
newBoard.values[iRow] = guess;
newBoard.isFixedValue[iRow] = true;
noChanges = false;
}
}
}
}
}
if(checkPossibles(newBoard))
return;
for(int i = 0; i < sideSize; i++){
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iColumn = i; iColumn < boardSize; iColumn += sideSize){
if(!newBoard.isFixedValue[iColumn]){
total += newBoard.isPossibleValue[iColumn][guess-1];
}
}
if(total == 1){
for(int iColumn = i; iColumn < boardSize; iColumn += sideSize){
if(newBoard.isPossibleValue[iColumn][guess-1] && !newBoard.isFixedValue[iColumn]){
newBoard.values[iColumn] = guess;
newBoard.isFixedValue[iColumn] = true;
noChanges = false;
}
}
}
}
}
if(checkPossibles(newBoard))
return;
for(int r = 0; r < boardSize; r+= sideSize*3){
for(int c = 0; c < sideSize; c += 3){
int i = r+c;
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iBlockR = i; iBlockR < (i+sideSize*3); iBlockR += sideSize){
for(int iBlockC = iBlockR; iBlockC < (iBlockR + 3); iBlockC++){
if(!newBoard.isFixedValue[iBlockC])
total += newBoard.isPossibleValue[iBlockC][guess-1];
}
}
if(total == 1){
for(int iBlockR = i; iBlockR < (i+sideSize*3); iBlockR += sideSize){
for(int iBlockC = iBlockR; iBlockC < (iBlockR + 3); iBlockC++){
if(newBoard.isPossibleValue[iBlockC][guess-1] && !newBoard.isFixedValue[iBlockC]){
newBoard.values[iBlockC] = guess;
newBoard.isFixedValue[iBlockC] = true;
noChanges = false;
}
}
}
}
}
}
}
if(checkPossibles(newBoard))
return;
}while(noChanges == false);
if(!isSolved(newBoard))
newBoard = recursiveBrute(newBoard, 0);
}
int main(int argc, char *argv[]){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
char fileName[256];
Board mainBoard;
getFileName(fileName);
if(!getBoard(fileName,mainBoard))
cout <<"Error with filename";
else if(!validateBoard(mainBoard)){
displayBoard(mainBoard);
return 0;
}
solve(mainBoard);
displayBoard(mainBoard);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout <<"Solve time took " << milliseconds << "ms"<<endl;
return 0;
}
|
main_fft.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************
* test fft
* AUTHOR:haili
*
****************************************************/
#if 0
#include<cuda.h>
#include<stdlib.h>
#include<stdio.h>
#include"fft.h"
#include"gettime.c"
#include<time.h>
int main(int argc,char* argv[]){
int m,n,batch;
if(argc==4){
m=atoi(argv[1]);
n=atoi(argv[2]);
batch=atoi(argv[3]);
}else{
fprintf(stderr,"[%s:%d]input err!try again!\n",__FUNCTION__,__LINE__);
exit(-1);
}
//----------create data------------------------------
srand((unsigned)time(NULL));
hipComplex* data=(hipComplex*)malloc(sizeof(hipComplex)*batch*m*n);
for(int i=0;i<m*n*batch;i++){
data[i].x=(float)rand()/(RAND_MAX/100);
data[i].y=(float)rand()/(RAND_MAX/100);
}
//----------fft2-------------------------------------
// for(int i=0;i<m*n*batch;i++){
// printf("%f,%f\n",data[i].x,data[i].y);
// }
double time1,time2,time3,time4;
hipComplex* result=(hipComplex*)malloc(sizeof(hipComplex)*batch*n*m);
time1=gettime();
fft2_batch(m,n,data,result,batch);
time2=gettime();
// for(int i=0;i<m*n*batch;i++){
// printf("%f,%f\n",result[i].x,result[i].y);
// }
time4=gettime();
ifft2_batch(m,n,result,data,batch);
time3=gettime();
printf("%d %d %d %f %f\n",m,n,batch,time2-time1,time3-time4);
// for(int i=0;i<m*n*batch;i++){
// printf("%f,%f\n",data[i].x/6,data[i].y/6);
// }
return 0;
}
#endif
| main_fft.cu | /***************************************************
* test fft
* AUTHOR:haili
*
****************************************************/
#if 0
#include<cuda.h>
#include<stdlib.h>
#include<stdio.h>
#include"fft.h"
#include"gettime.c"
#include<time.h>
int main(int argc,char* argv[]){
int m,n,batch;
if(argc==4){
m=atoi(argv[1]);
n=atoi(argv[2]);
batch=atoi(argv[3]);
}else{
fprintf(stderr,"[%s:%d]input err!try again!\n",__FUNCTION__,__LINE__);
exit(-1);
}
//----------create data------------------------------
srand((unsigned)time(NULL));
cuComplex* data=(cuComplex*)malloc(sizeof(cuComplex)*batch*m*n);
for(int i=0;i<m*n*batch;i++){
data[i].x=(float)rand()/(RAND_MAX/100);
data[i].y=(float)rand()/(RAND_MAX/100);
}
//----------fft2-------------------------------------
// for(int i=0;i<m*n*batch;i++){
// printf("%f,%f\n",data[i].x,data[i].y);
// }
double time1,time2,time3,time4;
cuComplex* result=(cuComplex*)malloc(sizeof(cuComplex)*batch*n*m);
time1=gettime();
fft2_batch(m,n,data,result,batch);
time2=gettime();
// for(int i=0;i<m*n*batch;i++){
// printf("%f,%f\n",result[i].x,result[i].y);
// }
time4=gettime();
ifft2_batch(m,n,result,data,batch);
time3=gettime();
printf("%d %d %d %f %f\n",m,n,batch,time2-time1,time3-time4);
// for(int i=0;i<m*n*batch;i++){
// printf("%f,%f\n",data[i].x/6,data[i].y/6);
// }
return 0;
}
#endif
|
f66cfb65005e8dc46f701762135c1933ffb82000.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
printf("%i CELLS\n", gridCellCount);
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos2 failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 posSelf = pos[iSelf];
glm::vec3 cmass(0.f, 0.f, 0.f);
int cmassCount = 0;
glm::vec3 collisionV(0.f, 0.f, 0.f);
glm::vec3 cvel(0.f, 0.f, 0.f);
int cvelCount = 0;
for (int i = 0; i < N; ++i) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && glm::distance(pos[i], posSelf) < rule1Distance) {
cmass += pos[i];
cmassCount++;
}
// Rule 2: boids try to stay a distance d away from each other
if (i != iSelf && glm::distance(pos[i], posSelf) < rule2Distance) {
collisionV -= pos[i] - posSelf;
}
// Rule 3: boids try to match the speed of surrounding boids
if (i != iSelf && glm::distance(pos[i], posSelf) < rule3Distance) {
cvel += vel[i];
cvelCount++;
}
}
cmass = cmassCount > 0 ? (cmass / (float)cmassCount) - pos[iSelf] : cmass;
cvel = cvelCount > 0 ? cvel / (float)cvelCount : cvel;
return cmass * rule1Scale + collisionV * rule2Scale + cvel * rule3Scale;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 vel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
vel = glm::normalize(vel) * glm::min(maxSpeed, glm::length(vel));
// Record the new velocity into vel2. Question: why NOT vel1?
// (don't want to mutate other velocities that are probably being used by other threads)
vel2[index] = vel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Label each boid with the index of its grid cell.
//int xIdx = (pos[index].x - gridMin.x) * inverseCellWidth;
//int yIdx = (pos[index].y - gridMin.y) * inverseCellWidth;
//int zIdx = (pos[index].z - gridMin.z) * inverseCellWidth;
glm::ivec3 index3D = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(index3D.x, index3D.y, index3D.z, gridResolution);
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N || index < 1) { // ignore 0 index b.c. we look at [index-1, index]
return;
}
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int gIdxPrev = particleGridIndices[index - 1];
int gIdx = particleGridIndices[index];
if (gIdxPrev != gIdx) {
gridCellEndIndices[gIdxPrev] = gridCellStartIndices[gIdx] = index;
}
}
__device__ void getGridIndexAndQuadrantMask(glm::vec3 pos, glm::vec3 gridMin, float inverseCellWidth, glm::ivec3 &gIdx, unsigned char &mask) {
glm::vec3 partialIndex3D = (pos - gridMin) * inverseCellWidth;
gIdx = partialIndex3D;
partialIndex3D = partialIndex3D - glm::floor(partialIndex3D);
mask = ((partialIndex3D.x >= 0.5f) << 0) ||
((partialIndex3D.y >= 0.5f) << 1) ||
((partialIndex3D.z >= 0.5f) << 2);
}
__device__ int getNeighborCells(int neighborCells[8], const glm::ivec3 &gIdx, const unsigned char &mask, int gridResolution) {
int nCells = 0;
for (int k = -1; k <= 1; ++k) {
for (int j = -1; j <= 1; ++j) {
for (int i = -1; i <= 1; ++i) {
if (!(
(i == -1 && (mask & 0x1)) || (i == 1 && !(mask & 0x1)) ||
(j == -1 && (mask & 0x2)) || (j == 1 && !(mask & 0x2)) ||
(k == -1 && (mask & 0x4)) || (k == 1 && !(mask & 0x4))
)) {
glm::ivec3 idx = gIdx + glm::ivec3(i, j, k);
if (!glm::any(glm::lessThan(idx, glm::ivec3(0, 0, 0))) &&
!glm::any(glm::greaterThanEqual(idx, glm::ivec3(gridResolution, gridResolution, gridResolution)))) {
neighborCells[nCells++] = gridIndex3Dto1D(idx.x, idx.y, idx.z, gridResolution);
}
}
}
}
}
return nCells;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::ivec3 index3D;
unsigned char mask;
getGridIndexAndQuadrantMask(pos[index], gridMin, inverseCellWidth, index3D, mask);
// - Identify which cells may contain neighbors. This isn't always 8.
int neighborCells[8];
int nCells = getNeighborCells(neighborCells, index3D, mask, gridResolution);
glm::vec3 posSelf = pos[index];
glm::vec3 cmass(0.f, 0.f, 0.f);
int cmassCount = 0;
glm::vec3 collisionV(0.f, 0.f, 0.f);
glm::vec3 cvel(0.f, 0.f, 0.f);
int cvelCount = 0;
for (int c = 0; c < nCells; ++c) {
// - For each cell, read the start/end indices in the boid pointer array.
int n = neighborCells[c];
int start = gridCellStartIndices[n];
int end = gridCellEndIndices[n];
for (int b = start; b < end; ++b) {
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
int particleIndex = particleArrayIndices[b];
if (particleIndex == index) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::distance(pos[particleIndex], posSelf) < rule1Distance) {
cmass += pos[particleIndex];
cmassCount++;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::distance(pos[particleIndex], posSelf) < rule2Distance) {
collisionV -= pos[particleIndex] - posSelf;
}
// Rule 3: boids try to match the speed of surrounding boids
if (glm::distance(pos[particleIndex], posSelf) < rule3Distance) {
cvel += vel1[particleIndex];
cvelCount++;
}
}
}
cmass = cmassCount > 0 ? (cmass / (float)cmassCount) - pos[index] : cmass;
cvel = cvelCount > 0 ? cvel / (float)cvelCount : cvel;
glm::vec3 vel = vel1[index] + cmass * rule1Scale + collisionV * rule2Scale + cvel * rule3Scale;
// - Clamp the speed change before putting the new speed in vel2
vel = glm::normalize(vel) * glm::min(maxSpeed, glm::length(vel));
vel2[index] = vel;
}
__global__ void kernShuffleParticleBuffers(int N,
glm::vec3* srcPos, glm::vec3* srcVel,
int* tgtIndices, glm::vec3* tgtPos, glm::vec3* tgtVel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
tgtPos[index] = srcPos[tgtIndices[index]];
tgtVel[index] = srcVel[tgtIndices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::ivec3 index3D;
unsigned char mask;
getGridIndexAndQuadrantMask(pos[index], gridMin, inverseCellWidth, index3D, mask);
// - Identify which cells may contain neighbors. This isn't always 8.
int neighborCells[8];
int nCells = getNeighborCells(neighborCells, index3D, mask, gridResolution);
glm::vec3 posSelf = pos[index];
glm::vec3 cmass(0.f, 0.f, 0.f);
int cmassCount = 0;
glm::vec3 collisionV(0.f, 0.f, 0.f);
glm::vec3 cvel(0.f, 0.f, 0.f);
int cvelCount = 0;
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int c = 0; c < nCells; ++c) {
int n = neighborCells[c];
int start = gridCellStartIndices[n];
int end = gridCellEndIndices[n];
for (int particleIndex = start; particleIndex < end; ++particleIndex) {
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
if (particleIndex == index) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::distance(pos[particleIndex], posSelf) < rule1Distance) {
cmass += pos[particleIndex];
cmassCount++;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::distance(pos[particleIndex], posSelf) < rule2Distance) {
collisionV -= pos[particleIndex] - posSelf;
}
// Rule 3: boids try to match the speed of surrounding boids
if (glm::distance(pos[particleIndex], posSelf) < rule3Distance) {
cvel += vel1[particleIndex];
cvelCount++;
}
}
}
cmass = cmassCount > 0 ? (cmass / (float)cmassCount) - pos[index] : cmass;
cvel = cvelCount > 0 ? cvel / (float)cvelCount : cvel;
glm::vec3 vel = vel1[index] + cmass * rule1Scale + collisionV * rule2Scale + cvel * rule3Scale;
// - Clamp the speed change before putting the new speed in vel2
float len = glm::length(vel);
if (len > maxSpeed) {
vel = glm::normalize(vel) * glm::min(maxSpeed, glm::length(vel));
}
vel2[index] = vel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos,
dev_vel1, dev_vel2);
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
kernComputeIndices << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernShuffleParticleBuffers << < fullBlocksPerGrid, blockSize >> >(
numObjects, dev_pos, dev_vel1,
dev_particleArrayIndices, dev_pos2, dev_vel2);
// std::swap(dev_vel1, dev_vel2);
//std::swap(dev_pos, dev_pos2);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_pos2, dev_vel2, dev_vel1);
// - Update positions
kernUpdatePos << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos2, dev_vel1);
// - Ping-pong buffers as needed
//std::swap(dev_vel1, dev_vel2);
std::swap(dev_pos, dev_pos2);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers her.e
hipFree(dev_pos2);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete(intKeys);
delete(intValues);
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| f66cfb65005e8dc46f701762135c1933ffb82000.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
printf("%i CELLS\n", gridCellCount);
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos2 failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 posSelf = pos[iSelf];
glm::vec3 cmass(0.f, 0.f, 0.f);
int cmassCount = 0;
glm::vec3 collisionV(0.f, 0.f, 0.f);
glm::vec3 cvel(0.f, 0.f, 0.f);
int cvelCount = 0;
for (int i = 0; i < N; ++i) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (i != iSelf && glm::distance(pos[i], posSelf) < rule1Distance) {
cmass += pos[i];
cmassCount++;
}
// Rule 2: boids try to stay a distance d away from each other
if (i != iSelf && glm::distance(pos[i], posSelf) < rule2Distance) {
collisionV -= pos[i] - posSelf;
}
// Rule 3: boids try to match the speed of surrounding boids
if (i != iSelf && glm::distance(pos[i], posSelf) < rule3Distance) {
cvel += vel[i];
cvelCount++;
}
}
cmass = cmassCount > 0 ? (cmass / (float)cmassCount) - pos[iSelf] : cmass;
cvel = cvelCount > 0 ? cvel / (float)cvelCount : cvel;
return cmass * rule1Scale + collisionV * rule2Scale + cvel * rule3Scale;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 vel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
vel = glm::normalize(vel) * glm::min(maxSpeed, glm::length(vel));
// Record the new velocity into vel2. Question: why NOT vel1?
// (don't want to mutate other velocities that are probably being used by other threads)
vel2[index] = vel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Label each boid with the index of its grid cell.
//int xIdx = (pos[index].x - gridMin.x) * inverseCellWidth;
//int yIdx = (pos[index].y - gridMin.y) * inverseCellWidth;
//int zIdx = (pos[index].z - gridMin.z) * inverseCellWidth;
glm::ivec3 index3D = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(index3D.x, index3D.y, index3D.z, gridResolution);
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N || index < 1) { // ignore 0 index b.c. we look at [index-1, index]
return;
}
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int gIdxPrev = particleGridIndices[index - 1];
int gIdx = particleGridIndices[index];
if (gIdxPrev != gIdx) {
gridCellEndIndices[gIdxPrev] = gridCellStartIndices[gIdx] = index;
}
}
__device__ void getGridIndexAndQuadrantMask(glm::vec3 pos, glm::vec3 gridMin, float inverseCellWidth, glm::ivec3 &gIdx, unsigned char &mask) {
glm::vec3 partialIndex3D = (pos - gridMin) * inverseCellWidth;
gIdx = partialIndex3D;
partialIndex3D = partialIndex3D - glm::floor(partialIndex3D);
mask = ((partialIndex3D.x >= 0.5f) << 0) ||
((partialIndex3D.y >= 0.5f) << 1) ||
((partialIndex3D.z >= 0.5f) << 2);
}
__device__ int getNeighborCells(int neighborCells[8], const glm::ivec3 &gIdx, const unsigned char &mask, int gridResolution) {
int nCells = 0;
for (int k = -1; k <= 1; ++k) {
for (int j = -1; j <= 1; ++j) {
for (int i = -1; i <= 1; ++i) {
if (!(
(i == -1 && (mask & 0x1)) || (i == 1 && !(mask & 0x1)) ||
(j == -1 && (mask & 0x2)) || (j == 1 && !(mask & 0x2)) ||
(k == -1 && (mask & 0x4)) || (k == 1 && !(mask & 0x4))
)) {
glm::ivec3 idx = gIdx + glm::ivec3(i, j, k);
if (!glm::any(glm::lessThan(idx, glm::ivec3(0, 0, 0))) &&
!glm::any(glm::greaterThanEqual(idx, glm::ivec3(gridResolution, gridResolution, gridResolution)))) {
neighborCells[nCells++] = gridIndex3Dto1D(idx.x, idx.y, idx.z, gridResolution);
}
}
}
}
}
return nCells;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::ivec3 index3D;
unsigned char mask;
getGridIndexAndQuadrantMask(pos[index], gridMin, inverseCellWidth, index3D, mask);
// - Identify which cells may contain neighbors. This isn't always 8.
int neighborCells[8];
int nCells = getNeighborCells(neighborCells, index3D, mask, gridResolution);
glm::vec3 posSelf = pos[index];
glm::vec3 cmass(0.f, 0.f, 0.f);
int cmassCount = 0;
glm::vec3 collisionV(0.f, 0.f, 0.f);
glm::vec3 cvel(0.f, 0.f, 0.f);
int cvelCount = 0;
for (int c = 0; c < nCells; ++c) {
// - For each cell, read the start/end indices in the boid pointer array.
int n = neighborCells[c];
int start = gridCellStartIndices[n];
int end = gridCellEndIndices[n];
for (int b = start; b < end; ++b) {
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
int particleIndex = particleArrayIndices[b];
if (particleIndex == index) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::distance(pos[particleIndex], posSelf) < rule1Distance) {
cmass += pos[particleIndex];
cmassCount++;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::distance(pos[particleIndex], posSelf) < rule2Distance) {
collisionV -= pos[particleIndex] - posSelf;
}
// Rule 3: boids try to match the speed of surrounding boids
if (glm::distance(pos[particleIndex], posSelf) < rule3Distance) {
cvel += vel1[particleIndex];
cvelCount++;
}
}
}
cmass = cmassCount > 0 ? (cmass / (float)cmassCount) - pos[index] : cmass;
cvel = cvelCount > 0 ? cvel / (float)cvelCount : cvel;
glm::vec3 vel = vel1[index] + cmass * rule1Scale + collisionV * rule2Scale + cvel * rule3Scale;
// - Clamp the speed change before putting the new speed in vel2
vel = glm::normalize(vel) * glm::min(maxSpeed, glm::length(vel));
vel2[index] = vel;
}
__global__ void kernShuffleParticleBuffers(int N,
glm::vec3* srcPos, glm::vec3* srcVel,
int* tgtIndices, glm::vec3* tgtPos, glm::vec3* tgtVel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
tgtPos[index] = srcPos[tgtIndices[index]];
tgtVel[index] = srcVel[tgtIndices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// - Identify the grid cell that this particle is in
glm::ivec3 index3D;
unsigned char mask;
getGridIndexAndQuadrantMask(pos[index], gridMin, inverseCellWidth, index3D, mask);
// - Identify which cells may contain neighbors. This isn't always 8.
int neighborCells[8];
int nCells = getNeighborCells(neighborCells, index3D, mask, gridResolution);
glm::vec3 posSelf = pos[index];
glm::vec3 cmass(0.f, 0.f, 0.f);
int cmassCount = 0;
glm::vec3 collisionV(0.f, 0.f, 0.f);
glm::vec3 cvel(0.f, 0.f, 0.f);
int cvelCount = 0;
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int c = 0; c < nCells; ++c) {
int n = neighborCells[c];
int start = gridCellStartIndices[n];
int end = gridCellEndIndices[n];
for (int particleIndex = start; particleIndex < end; ++particleIndex) {
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
if (particleIndex == index) continue;
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (glm::distance(pos[particleIndex], posSelf) < rule1Distance) {
cmass += pos[particleIndex];
cmassCount++;
}
// Rule 2: boids try to stay a distance d away from each other
if (glm::distance(pos[particleIndex], posSelf) < rule2Distance) {
collisionV -= pos[particleIndex] - posSelf;
}
// Rule 3: boids try to match the speed of surrounding boids
if (glm::distance(pos[particleIndex], posSelf) < rule3Distance) {
cvel += vel1[particleIndex];
cvelCount++;
}
}
}
cmass = cmassCount > 0 ? (cmass / (float)cmassCount) - pos[index] : cmass;
cvel = cvelCount > 0 ? cvel / (float)cvelCount : cvel;
glm::vec3 vel = vel1[index] + cmass * rule1Scale + collisionV * rule2Scale + cvel * rule3Scale;
// - Clamp the speed change before putting the new speed in vel2
float len = glm::length(vel);
if (len > maxSpeed) {
vel = glm::normalize(vel) * glm::min(maxSpeed, glm::length(vel));
}
vel2[index] = vel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices<<< fullBlocksPerGrid, blockSize >>>(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd<<< fullBlocksPerGrid, blockSize >>>(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered<<< fullBlocksPerGrid, blockSize >>>(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos,
dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos<<< fullBlocksPerGrid, blockSize >>>(numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
kernComputeIndices << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernShuffleParticleBuffers << < fullBlocksPerGrid, blockSize >> >(
numObjects, dev_pos, dev_vel1,
dev_particleArrayIndices, dev_pos2, dev_vel2);
// std::swap(dev_vel1, dev_vel2);
//std::swap(dev_pos, dev_pos2);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_pos2, dev_vel2, dev_vel1);
// - Update positions
kernUpdatePos << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos2, dev_vel1);
// - Ping-pong buffers as needed
//std::swap(dev_vel1, dev_vel2);
std::swap(dev_pos, dev_pos2);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers her.e
cudaFree(dev_pos2);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete(intKeys);
delete(intValues);
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
9471faed0a0f2f5b37f7354cbe9daf96d6535f7e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Diffusion.cpp
*
* Created on: May 18, 2016
* Author: NungnunG
*/
#include "../common.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
// Include CUDA runtime and CUFFT
#include <hip/hip_runtime.h>
#include <hipfft.h>
// Helper functions for CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "../Utilities/output_utils.h"
#include "convolutionFFT_common.h"
//#include "convolutionFFT2D.cu"
using namespace std;
#ifdef GPU_DIFFUSE // (*)
bool firstTransferCompleted = false;
////////////////////////////////////////////////////////////////////////////////
//// Helper functions
//////////////////////////////////////////////////////////////////////////////////
int snapTransformSize(int dataSize)
{
int hiBit;
unsigned int lowPOT, hiPOT;
dataSize = iAlignUp(dataSize, 16);
for (hiBit = 31; hiBit >= 0; hiBit--)
if (dataSize & (1U << hiBit))
{
break;
}
lowPOT = 1U << hiBit;
if (lowPOT == (unsigned int)dataSize)
{
return dataSize;
}
hiPOT = 1U << (hiBit + 1);
if (hiPOT <= 1024)
{
return hiPOT;
}
else
{
//return iAlignUp(dataSize, 512);
return pow(2, ceil(log(dataSize)/log(2)));
}
}
void H2D(int ic, c_ctx* chem_cctx, int np)
{
#ifdef MODEL_3D
int ig = chem_cctx->gpu_id[ic];
hipSetDevice(chem_cctx->dev_id[ic]);//ig);
#ifndef M40
const int fftD = chem_cctx->FFTD;
const int fftH = chem_cctx->FFTH;
const int fftW = chem_cctx->FFTW;
size_t fssize_b = fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex);
// size_t fsize_b = fftD * fftH * fftW * sizeof(float);
// Copy Kernel Spectrum from Host to Device
checkCudaErrors(hipMemcpy(chem_cctx->d_kernelspectrum_h[ic], chem_cctx->h_kernelspectrum[ic],
fssize_b, hipMemcpyHostToDevice));
#endif // M40
#endif // MODEL_3D
float **h_ibuffs = chem_cctx->h_ibuffs;
#ifdef ASYNCCPY
printf("async P2D copy: gpu[%d] chem [%d]\n", ig, ic);
checkCudaErrors(hipMemcpyAsync(chem_cctx->d_data[ic], h_ibuffs[ic], np*sizeof(float),
hipMemcpyHostToDevice, stream[ig]));
#else // ASYNCCPY
checkCudaErrors(hipMemcpy(chem_cctx->d_data[ic], h_ibuffs[ic],
np*sizeof(float), hipMemcpyHostToDevice));
#endif // ASYNCCPY
}
void D2H(int ic, c_ctx* chem_cctx, int np)
{
#ifdef MODEL_3D
int ig = chem_cctx->gpu_id[ic];
hipSetDevice(chem_cctx->dev_id[ic]);//ig);
#endif // MODEL_3D
float **h_obuffs = chem_cctx->h_obuffs;
#ifdef ASYNCCPY
printf("async D2P copy: gpu[%d] chem [%d]\n", i, ic);
checkCudaErrors(hipMemcpyAsync(h_obuffs[ic], chem_cctx->d_data[ic], np*sizeof(float),
hipMemcpyDeviceToHost, stream[ig]));
#else // ASYNCCPY
checkCudaErrors(hipMemcpy(h_obuffs[ic], chem_cctx->d_data[ic],
np*sizeof(float), hipMemcpyDeviceToHost));
#endif // ASYNCCPY
}
bool compareResults(float *h_ResultCPU, float *h_ResultGPU,
int dataW, int dataH, int dataD, float eThreshold)
{
double sum_delta2 = 0;
double sum_ref2 = 0;
double max_delta_ref = 0;
double sum = 0;
for (int z = 0; z < dataD; z++)
for (int y = 0; y < dataH; y++)
for (int x = 0; x < dataW; x++)
{
double rCPU = (double)h_ResultCPU[z * dataH * dataW + y * dataW + x];
double rGPU = (double)h_ResultGPU[z * dataH * dataW + y * dataW + x];
double delta = (rCPU - rGPU) * (rCPU - rGPU);
double ref = rCPU * rCPU + rCPU * rCPU;
if ((delta / ref) > max_delta_ref)
{
max_delta_ref = delta / ref;
}
// if ((ref-0.0) > 0.000001){
// if ((delta / ref) > max_delta_ref)
// {
// max_delta_ref = delta / ref;
// }
// }
sum_delta2 += delta;
sum_ref2 += ref;
sum += rGPU;
}
double L2norm = sqrt(sum_delta2 / sum_ref2);
printf("rel L2 = %E (max delta = %E)\n", L2norm, sqrt(max_delta_ref));
bool bRetVal = (L2norm < eThreshold) ? true : false;
printf(bRetVal ? "L2norm Error OK\n" : "L2norm Error too high!\n");
return bRetVal;
}
#ifndef MODEL_3D
/*
bool computeChemDiffusionCPU(
float *h_ChemOut,
float *h_ChemIn,
float *h_Kernel,
c_ctx cctx,
int iter)
{
bool bRetVal = 1;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
printf("Testing Chemical Diffusion CPU\n");
const int kernelH = cctx.KH;//7;
const int kernelW = cctx.KW;//6;
const int kernelY = cctx.KY;//3;
const int kernelX = cctx.KX;//4;
const int dataH = cctx.DH;//100;//1160;//2000;
const int dataW = cctx.DW;//100;//1660;//2000;
const int outKernelH = cctx.DH;
const int outKernelW = cctx.DW;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
fprintf(stderr,"...running reference CPU convolution\n");
convolutionClampToBorderCPU(
h_ChemOut,
h_ChemIn,
h_Kernel,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
sdkStopTimer(&hTimer);
double cpuTime = sdkGetTimerValue(&hTimer);
printf("\t\tCPU chemical diffusion computation:\t%f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (cpuTime * 0.001), cpuTime);
sdkDeleteTimer(&hTimer);
return bRetVal;
}
*/
void padData2D(
pad_t pt,
float *d_PaddedData,
float *d_Data,
int fftH,
int fftW,
int dataH,
int dataW,
int kernelH,
int kernelW,
int kernelY,
int kernelX,
int epiBoundary,
float baseChem)
{
switch(pt)
{
case pClampToBorder:
padDataClampToBorder(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX);
break;
case pRightWall:
padDataRightWall(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX);
break;
case pMirror:
padDataMirror(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX);
break;
case pConstantVF:
padDataConstantVF(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX,
epiBoundary,
baseChem);
break;
}
}
bool computeKernel(
float *d_Window,
int kernelRadius,
float lambda,
float gamma, // decay constant
float dt,
c_ctx cctx)
{
/********************************************
* Declarations and allocations *
********************************************/
float t = 0.0;
int
cpu_input = 0,
cpu_output = 1;
float
*h_Data,
*h_Kernel,
*h_Window,
*h_ResultGPU,
*h_ResultCPU[2];
float
*d_Data,
*d_PaddedData,
*d_Kernel,
*d_PaddedKernel;
fComplex
*d_DataSpectrum,
*d_KernelSpectrum;
hipfftHandle
fftPlanFwd,
fftPlanInv;
bool bRetVal = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
#ifdef PRINT_KERNEL
fprintf(stderr, "Testing kernel computation\n");
printf("Testing kernel computation\n");
fprintf(stderr, "\tBuilding filter kernel\n");
#endif
const int kernelH = cctx.KH;//7;
const int kernelW = cctx.KW;//6;
const int kernelY = cctx.KY;//3;
const int kernelX = cctx.KX;//4;
const int dataH = cctx.DH;//100;//1160;//2000;
const int dataW = cctx.DW;//100;//1660;//2000;
const int outKernelH = cctx.DH;
const int outKernelW = cctx.DW;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
// Changed 2
const int windowH = cctx.windowH;
const int windowW = cctx.windowW;
#ifdef PRINT_KERNEL
fprintf(stderr, "...allocating memory\n");
#endif
h_Data = (float *)malloc(dataH * dataW * sizeof(float));
h_Kernel = (float *)malloc(kernelH * kernelW * sizeof(float));
h_Window = (float *)malloc(windowH * windowW * sizeof(float));
h_ResultGPU = (float *)malloc(dataH * dataW * sizeof(float));
h_ResultCPU[cpu_input] = (float *)malloc(dataH * dataW * sizeof(float));
h_ResultCPU[cpu_output] = (float *)malloc(dataH * dataW * sizeof(float));
checkCudaErrors(hipMalloc((void **)&d_Data, dataH * dataW * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Kernel, kernelH * kernelW * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_PaddedData, fftH * fftW * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_PaddedKernel, fftH * fftW * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_DataSpectrum, fftH * (fftW / 2 + 1) * sizeof(fComplex)));
checkCudaErrors(hipMalloc((void **)&d_KernelSpectrum, fftH * (fftW / 2 + 1) * sizeof(fComplex)));
#ifdef PRINT_KERNEL
fprintf(stderr, "...generating 2D %d x %d kernel coefficients\n", kernelH, kernelW);
#endif
/********************************************
* Initial kernel initialization *
********************************************/
for (int i = 0; i < kernelH * kernelW; i++)
{
h_Kernel[i] = 0;
}
h_Kernel[0 * kernelW + 1] = lambda;
h_Kernel[1 * kernelW + 0] = lambda;
h_Kernel[1 * kernelW + 2] = lambda;
h_Kernel[2 * kernelW + 1] = lambda;
h_Kernel[1 * kernelW + 1] = 1 - 4*lambda - gamma*dt;
for (int i = 0; i < dataH * dataW; i++)
{
h_Data[i] = 0;
h_ResultCPU[cpu_input][i] = 0;
}
// Copy kernel data to middle block of the input
int start_i = outKernelH/2 - kernelH/2;
int end_i = outKernelH/2 + kernelH/2 + 1;
int start_j = outKernelW/2 - kernelW/2;
int end_j = outKernelW/2 + kernelW/2 + 1;
int ki = 0, kj = 0;
for (int i = start_i; i < end_i; i++) {
for (int j = start_j; j < end_j; j++) {
h_Data [i * dataW + j] = h_Kernel[ki * kernelW + kj];
h_ResultCPU [cpu_input] [i * dataW + j] = h_Kernel[ki * kernelW + kj];
#ifdef PRINT_KERNEL
printf("%d,%d -> %d,%d\n", ki, kj, i, j);
#endif
kj++;
}
ki++;
kj = 0;
}
#ifdef PRINT_KERNEL
fprintf(stderr, "...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
#endif
checkCudaErrors(hipfftPlan2d(&fftPlanFwd, fftH, fftW, HIPFFT_R2C));
checkCudaErrors(hipfftPlan2d(&fftPlanInv, fftH, fftW, HIPFFT_C2R));
#ifdef PRINT_KERNEL
fprintf(stderr, "...uploading to GPU and padding convolution kernel and input data\n");
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(hipMemcpy(d_Kernel, h_Kernel, kernelH * kernelW * sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Data, h_Data, dataH * dataW * sizeof(float), hipMemcpyHostToDevice));
sdkStopTimer(&hTimer);
double dataTransferTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
// printf("\tData transfer: %f MPix/s (%f ms)\n", (double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(hipMemset(d_PaddedKernel, 0, fftH * fftW * sizeof(float)));
checkCudaErrors(hipMemset(d_PaddedData, 0, fftH * fftW * sizeof(float)));
padKernel(
d_PaddedKernel,
d_Kernel,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
padData2D(
pClampToBorder,
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX,
-1,
-1
);
sdkStopTimer(&hTimer);
double memsetPaddingTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
// printf("\tMemset and padding: %f MPix/s (%f ms)\n", (double)dataH * (double)dataW * 1e-6 / (memsetPaddingTime * 0.001), memsetPaddingTime);
#endif
//Not including kernel transformation into time measurement,
//since convolution kernel is not changed very frequently
#ifdef PRINT_KERNEL
fprintf(stderr, "...transforming convolution kernel\n");
#endif
double buildKernelTimeTotalGPU = 0;
double buildKernelTimeTotalCPU = 0;
checkCudaErrors(hipDeviceSynchronize());
/********************************************
* Kernel Computation *
********************************************/
// d_KernelSpectrum = FFT(d_PaddedKernel)
checkCudaErrors(hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedKernel, (hipfftComplex *)d_KernelSpectrum));
for (int iter = 0; iter < kernelRadius; iter++)
{
#ifdef PRINT_KERNEL
fprintf(stderr, "...running GPU Kernel building iteration %d:\n", iter);
printf("GPU Kernel building iteration %d:\n", iter);
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(hipDeviceSynchronize());
/********************************************
* Convolution *
********************************************/
checkCudaErrors(hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedData, (hipfftComplex *)d_DataSpectrum));
modulateAndNormalize(d_DataSpectrum, d_KernelSpectrum, fftH, fftW, 1);
checkCudaErrors(hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_DataSpectrum, (hipfftReal *)d_PaddedData));
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
printf("\t\tGPU computation: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (gpuTime * 0.001), gpuTime);
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
#ifdef PRINT_KERNEL
fprintf(stderr, "...removing results padding\n");
#endif
unpadResult(
d_Data,
d_PaddedData,
dataH,
dataW,
fftH,
fftW
);
sdkStopTimer(&hTimer);
double unpadTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
printf("\t\textract results: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (unpadTime * 0.001), unpadTime);
fprintf(stderr, "...reading back GPU convolution results\n");
#endif
checkCudaErrors(hipMemcpy(h_ResultGPU, d_Data, dataH * dataW * sizeof(float), hipMemcpyDeviceToHost));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
#ifdef PRINT_KERNEL
fprintf(stderr, "...running reference CPU convolution\n");
#endif
// convolutionClampToBorderCPU(
// h_ResultCPU[cpu_output],
// h_ResultCPU[cpu_input],
// h_Kernel,
// dataH,
// dataW,
// kernelH,
// kernelW,
// kernelY,
// kernelX
// );
sdkStopTimer(&hTimer);
double cpuTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
printf("\t\tCPU computation: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (cpuTime * 0.001), cpuTime);
#endif
buildKernelTimeTotalGPU += gpuTime;
buildKernelTimeTotalGPU += unpadTime;
buildKernelTimeTotalCPU += cpuTime;
t += dt;
// Update indices for CPU input/output
cpu_input = (cpu_input + 1) % 2;
cpu_output = (cpu_output + 1) % 2;
}
#ifdef PRINT_KERNEL
printf("...extract kernel window from center\n");
#endif
/********************************************************
* Kernel center extraction - FINAL time domain results *
********************************************************/
extractCenter(
d_Window,
d_Data,
dataH,
dataW,
windowH,
windowW
);
#ifdef PRINT_KERNEL
printf("...reading back kernel center from GPU\n");
#endif
checkCudaErrors(hipMemcpy(h_Window, d_Window, windowH * windowW * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < kernelH; i++) {
for (int j = 0; j < kernelW; j++) {
#ifdef PRINT_KERNEL
printf(", %f", h_Kernel[i*kernelW + j]);
#endif
}
#ifdef PRINT_KERNEL
printf("\n");
#endif
}
#ifdef PRINT_KERNEL
printf("\tData transfer: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
printf("\tMemset and padding: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (memsetPaddingTime * 0.001), memsetPaddingTime);
printf("\tTotal GPU time: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalGPU * 0.001), buildKernelTimeTotalGPU);
printf("\tTotal CPU time: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalCPU * 0.001), buildKernelTimeTotalCPU);
fprintf(stderr, "...comparing the results: ");
#endif
// Update indices for CPU input/output
cpu_input = (cpu_input + 1) % 2;
cpu_output = (cpu_output + 1) % 2;
// bRetVal = compareResults(h_ResultCPU[cpu_output], h_ResultGPU,
// dataW, dataH, 1,
// 1e-6);
#ifdef PRINT_KERNEL
fprintf(stderr, "...shutting down\n");
#endif
sdkDeleteTimer(&hTimer);
/********************************************
* Pointer deallocations *
********************************************/
checkCudaErrors(hipfftDestroy(fftPlanInv));
checkCudaErrors(hipfftDestroy(fftPlanFwd));
checkCudaErrors(hipFree(d_DataSpectrum));
checkCudaErrors(hipFree(d_KernelSpectrum));
checkCudaErrors(hipFree(d_PaddedKernel));
checkCudaErrors(hipFree(d_Data));
checkCudaErrors(hipFree(d_Kernel));
checkCudaErrors(hipFree(d_PaddedData));
free(h_ResultCPU[0]);
free(h_ResultCPU[1]);
free(h_ResultGPU);
free(h_Window);
free(h_Data);
free(h_Kernel);
return bRetVal;
}
bool computeKernelSpectrum(
fComplex *d_KernelSpectrum,
float *d_Kernel,
c_ctx kernel_cctx,
c_ctx chem_cctx
)
{
float
*d_UnpaddedKernel,
*d_PaddedKernel;
// Changed
hipfftHandle
// fftPlanFwd,
// fftPlanInv;
fftPlan;
bool bRetVal = true;
#ifdef PRINT_KERNEL
printf("Testing kernel spectrum computation\n");
fprintf(stderr, "Testing kernel spectrum computation\n");
#endif
const int kernelH = chem_cctx.KH;//kernel_cctx.DH;//7;
const int kernelW = chem_cctx.KW;//kernel_cctx.DW;//6;
const int kernelY = chem_cctx.KY;//kernel_cctx.DH / 2;//3;
const int kernelX = chem_cctx.KX;//kernel_cctx.DW / 2;//4;
const int fftH = chem_cctx.FFTH;
const int fftW = chem_cctx.FFTW;
#ifdef PRINT_KERNEL
printf("\tkernelH: %d\tkernelW: %d\n", kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\n", kernelX, kernelY);
printf("\tfftH: %d\tfftW: %d\n", fftH, fftW);
fprintf(stderr,"...allocating memory\n");
#endif
checkCudaErrors(hipMalloc((void **)&d_PaddedKernel, fftH * fftW * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_UnpaddedKernel, kernelH * kernelW * sizeof(float)));
// Changed
#ifdef PRINT_KERNEL
// printf("...creating R2C FFT plans for %i x %i\n", fftH, fftW);
#endif
// checkCudaErrors(hipfftPlan2d(&fftPlanFwd, fftH, fftW, HIPFFT_R2C));
// checkCudaErrors(hipfftPlan2d(&fftPlanInv, fftH, fftW, HIPFFT_C2R));
#ifdef PRINT_KERNEL
printf("...creating C2C FFT plan for %i x %i\n", fftH, fftW / 2);
#endif
checkCudaErrors(hipfftPlan2d(&fftPlan, fftH, fftW / 2, HIPFFT_C2C));
#ifdef PRINT_KERNEL
fprintf(stderr,"...uploading to GPU and padding convolution kernel and input data\n");
#endif
checkCudaErrors(hipMemset(d_PaddedKernel, 0, fftH * fftW * sizeof(float)));
padKernel(
d_PaddedKernel,
d_Kernel,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
checkCudaErrors(hipDeviceSynchronize());
// Changed
// d_KernelSpectrum = FFT(d_PaddedKernel)
// checkCudaErrors(hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedKernel, (hipfftComplex *)d_KernelSpectrum));
//HIPFFT_BACKWARD works just as well...
const int FFT_DIR = HIPFFT_FORWARD;
#ifdef PRINT_KERNEL
printf("...transforming convolution kernel\n");
#endif
checkCudaErrors(hipfftExecC2C(fftPlan, (hipfftComplex *)d_PaddedKernel, (hipfftComplex *)d_KernelSpectrum, FFT_DIR));
checkCudaErrors(hipDeviceSynchronize());
// Changed
// checkCudaErrors(hipfftDestroy(fftPlanFwd));
checkCudaErrors(hipfftDestroy(fftPlan));
checkCudaErrors(hipFree(d_PaddedKernel));
checkCudaErrors(hipFree(d_UnpaddedKernel));
return bRetVal;
}
bool fftDiffuse2D(
float *d_Data,
fComplex *d_KernelSpectrum0,
c_ctx cctx,
int epiBoundary,
float baseChem)
{
float
*d_PaddedData;
fComplex
*d_DataSpectrum0;
hipfftHandle
fftPlan;
bool bRetVal = 1;
#ifdef PRINT_KERNEL
printf("Testing GPU chemical diffusion computation\n");
fprintf(stderr,"Testing GPU chemical diffusion computation\n");
#endif
const int kernelH = cctx.KH;
const int kernelW = cctx.KW;
const int kernelY = cctx.KY;
const int kernelX = cctx.KX;
const int dataH = cctx.DH;
const int dataW = cctx.DW;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
#ifdef PRINT_KERNEL
printf("\tkernelH: %d\tkernelW: %d\n", kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\n", kernelX, kernelY);
printf("\tdataH: %d\tdataW: %d\n", dataH, dataH);
printf("\tfftH: %d\tfftW: %d\n", fftH, fftW);
fprintf(stderr,"...allocating memory\n");
#endif
checkCudaErrors(hipMalloc((void **)&d_PaddedData, fftH * fftW * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_DataSpectrum0, fftH * (fftW / 2) * sizeof(fComplex)));
#ifdef PRINT_KERNEL
printf("...creating C2C FFT plan for %i x %i\n", fftH, fftW / 2);
#endif
checkCudaErrors(hipfftPlan2d(&fftPlan, fftH, fftW / 2, HIPFFT_C2C));
#ifdef PRINT_KERNEL
fprintf(stderr,"...uploading to GPU and padding input data\n");
#endif
checkCudaErrors(hipMemset(d_PaddedData, 0, fftH * fftW * sizeof(float)));
// DEBUG
// printf("------ before padding data\n");
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < dataW; j++) {
// printf(" %f ",h_ChemIn[i * dataW + j]);
// }
// printf("\n");
// }
// printf("\n");
// float *h_temp = (float *) malloc(fftH * fftH * sizeof(float));
// checkCudaErrors(hipMemcpy(h_temp, d_PaddedData, dataH * dataW * sizeof(float), hipMemcpyDeviceToHost));
// printf("------ before padding\n");
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < fftW; j++) {
// printf(" %f ",h_temp[i * fftW + j]);
// }
// printf("\n");
// }
// printf("\n");
padData2D(
pConstantVF, // pRightWall, //pMirror, //pClampToBorder,
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX,
epiBoundary,
baseChem
// 0.00
);
// DEBUG
// checkCudaErrors(hipMemcpy(h_temp, d_PaddedData, dataH * dataW * sizeof(float), hipMemcpyDeviceToHost));
// printf("------ after padding\n");
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < fftW; j++) {
// printf(" %f ",h_temp[i * fftW + j]);
// }
// printf("\n");
// }
// free(h_temp);
#ifdef PRINT_KERNEL
fprintf(stderr,"...performing convolution\n");
#endif
// Changed : Added
//HIPFFT_BACKWARD works just as well...
const int FFT_DIR = HIPFFT_FORWARD;
checkCudaErrors(hipDeviceSynchronize());
// --------- Computing convolution ------------ begin
// d_DataSpectrum = FFT(d_PaddedData)
checkCudaErrors(hipfftExecC2C(fftPlan, (hipfftComplex *)d_PaddedData, (hipfftComplex *)d_DataSpectrum0, FFT_DIR));
// d_DataSpectrum = d_DataSpectrum * d_KernelSpectrum
#ifdef PRINT_KERNEL
printf( "fftH: %d\tfftW: %d\n", fftH, fftW);
#endif
spProcess2D(d_DataSpectrum0, d_DataSpectrum0, d_KernelSpectrum0, fftH, fftW / 2, FFT_DIR);
// d_PaddedData = IFFT(d_DataSpectrum) <------- Output
checkCudaErrors(hipfftExecC2C(fftPlan, (hipfftComplex *)d_DataSpectrum0, (hipfftComplex *)d_PaddedData, -FFT_DIR));
// --------- Computing convolution ------------ end
checkCudaErrors(hipDeviceSynchronize());
#ifdef PRINT_KERNEL
fprintf(stderr,"...removing results padding\n");
#endif
unpadResult(
d_Data,
d_PaddedData,
dataH,
dataW,
fftH,
fftW
);
#ifdef PRINT_KERNEL
fprintf(stderr,"...reading back GPU convolution results\n");
#endif
checkCudaErrors(hipfftDestroy(fftPlan));
#ifdef PRINT_KERNEL
printf("...freeing device pointers\n");
#endif
checkCudaErrors(hipFree(d_DataSpectrum0));
checkCudaErrors(hipFree(d_PaddedData));
#ifdef PRINT_KERNEL
printf("...returning to main()\n");
#endif
return bRetVal;
}
#else // MODEL_3D
void my_sleep(unsigned usec) {
struct timespec req, rem;
int err;
req.tv_sec = usec / 1000000;
req.tv_nsec = (usec % 1000000) * 1000000000;
while ((req.tv_sec != 0) || (req.tv_nsec != 0)) {
if (nanosleep(&req, &rem) == 0)
break;
err = errno;
// Interrupted; continue
if (err == EINTR) {
req.tv_sec = rem.tv_sec;
req.tv_nsec = rem.tv_nsec;
}
// Unhandleable error (EFAULT (bad pointer), EINVAL (bad timeval in tv_nsec), or ENOSYS (function not supported))
break;
}
}
void reportMemUsageGPU()
{
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
checkCudaErrors(hipMemGetInfo( &free_byte, &total_byte )) ;
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f GB, free = %f GB, total = %f GB\n\n",
used_db/1024.0/1024.0/1024.0, free_db/1024.0/1024.0/1024.0,
total_db/1024.0/1024.0/1024.0);
}
bool computeKernel3DBatch(
int kernelRadius,
float *lambda,
float *gamma,
float dt,
c_ctx kern_cctx,
c_ctx chem_cctx
)
{
int *gpu_id = chem_cctx.gpu_id;
hipfftHandle
fftPlanFwd,
fftPlanInv;
const int fftD = kern_cctx.FFTD;
const int fftH = kern_cctx.FFTH;
const int fftW = kern_cctx.FFTW;
bool bRetVal;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
double kernelSpectrumComputationTime = 0.0;
// Compute all kernels
for (int ic = 0; ic < N_CHEM; ic++){
int ig = gpu_id[ic];
checkCudaErrors(hipSetDevice(chem_cctx.dev_id[ic]));//ig));
// Create FFT plans
// TODO: Make plan plans reusable
printf("...creating R2C & C2R 3D FFT plans for %i x %i x %i\n", fftD, fftH, fftW);
printf("\tchem %d on device %d\n", ic, ig);
checkCudaErrors(hipfftPlan3d(&fftPlanFwd, fftD, fftH, fftW, HIPFFT_R2C));
checkCudaErrors(hipfftPlan3d(&fftPlanInv, fftD, fftH, fftW, HIPFFT_C2R));
// reportMemUsageGPU();
/********************************************
* Kernel Computation *
********************************************/
bRetVal = computeKernel3D(
kernelRadius,
lambda[ic],
gamma[ic],
dt,
kern_cctx,
fftPlanFwd,
fftPlanInv,
ic
);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
/********************************************
* Kernel Spectrum Computation *
********************************************/
computeKernelSpectrum3D(
kern_cctx,
chem_cctx,
ic);
sdkStopTimer(&hTimer);
kernelSpectrumComputationTime += sdkGetTimerValue(&hTimer);
// Destroy reusable FFT plans
checkCudaErrors(hipfftDestroy(fftPlanInv));
checkCudaErrors(hipfftDestroy(fftPlanFwd));
}
printf("\tTotal ker spect computation: %f MPix/s (%f ms)\n",
(double)chem_cctx.DD * (double)chem_cctx.DH * (double) chem_cctx.DW * 1e-6 /
(kernelSpectrumComputationTime * 0.001),
kernelSpectrumComputationTime);
// /********************************************
// * Pointer deallocations *
// ********************************************/
// for (int ig = 0; ig < N_GPU; ig++)
// {
// checkCudaErrors(hipFree(kern_cctx.d_data[ig])); // [1] to [N_CHEM-1] is the same as [0]
// checkCudaErrors(hipFree(kern_cctx.d_kernelspectrum_h[ig]));
// }
printf("returning from compute kernel batch to main()\n");
return bRetVal;
}
bool computeKernel3D(
int kernelRadius,
float lambda,
float gamma, // decay constant
float dt,
c_ctx cctx,
hipfftHandle fftPlanFwd,
hipfftHandle fftPlanInv,
short int ic)
{
#ifdef COMPUTE_COVERAGE
int
cpu_input = 0,
cpu_output = 1;
#endif // COMPUTE_COVERAGE
float *h_Window = cctx.h_data[ic];
float *d_Window = cctx.d_data[ic];
fComplex *d_DataSpectrum = cctx.d_kernelspectrum_h[ic]; // Kernel result spectrum
float
*h_Data,
*h_Kernel,
*h_ResultGPU;
// *h_ResultCPU[2];
float
*d_Data,
*d_PaddedData,
*d_Kernel,
*d_PaddedKernel;
fComplex
*d_KernelSpectrum;
bool bRetVal = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
#ifdef PRINT_KERNEL
printf("Testing kernel computation\n");
printf("\tBuilding filter kernel\n");
#endif // PRINT_KERNEL
const int niter = cctx.niter;
const int kernelD = cctx.KD;
const int kernelH = cctx.KH;//7;
const int kernelW = cctx.KW;//6;
const int kernelZ = cctx.KZ;
const int kernelY = cctx.KY;//3;
const int kernelX = cctx.KX;//4;
const int dataD = cctx.DD;
const int dataH = cctx.DH;//100;//1160;//2000;
const int dataW = cctx.DW;//100;//1660;//2000;
const int outKernelD = cctx.DD;
const int outKernelH = cctx.DH;
const int outKernelW = cctx.DW;
const int fftD = cctx.FFTD;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
// Changed 2
const int windowD = cctx.windowD;
const int windowH = cctx.windowH;
const int windowW = cctx.windowW;
int ksize = kernelD * kernelH * kernelW;
int dsize = dataD * dataH * dataW;
int fsize = fftD * fftH * fftW;
int wsize = windowD * windowH * windowW;
int ksize_b = ksize * sizeof(float);
int dsize_b = dsize * sizeof(float);
int fsize_b = fsize * sizeof(float);
int wsize_b = wsize * sizeof(float);
#ifdef PRINT_KERNEL
printf("...allocating memory\n");
#endif // PRINT_KERNEL
h_Data = (float *)malloc(dsize_b);
h_Kernel = (float *)malloc(ksize_b);
checkCudaErrors(hipMalloc((void **)&d_Data, dsize_b));
checkCudaErrors(hipMalloc((void **)&d_Kernel, ksize_b));
checkCudaErrors(hipMalloc((void **)&d_PaddedData, fsize_b));
checkCudaErrors(hipMalloc((void **)&d_PaddedKernel, fsize_b));
printf("k: %p\n", d_PaddedKernel);
checkCudaErrors(hipMalloc((void **)&d_KernelSpectrum, fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex)));
/********************************************
* Initial kernel initialization *
********************************************/
printf("...generating 3D %d x %d x %d kernel coefficients\n", kernelD, kernelH, kernelW);
for (int i = 0; i < kernelD * kernelH * kernelW; i++)
{
h_Kernel[i] = 0;
}
int hStride = kernelW;
int dStride = kernelH * kernelW;
h_Kernel[0 * dStride + 1 * hStride + 1] = lambda;
h_Kernel[2 * dStride + 1 * hStride + 1] = lambda;
h_Kernel[1 * dStride + 0 * hStride + 1] = lambda;
h_Kernel[1 * dStride + 2 * hStride + 1] = lambda;
h_Kernel[1 * dStride + 1 * hStride + 0] = lambda;
h_Kernel[1 * dStride + 1 * hStride + 2] = lambda;
h_Kernel[1 * dStride + 1 * hStride + 1] = 1 - 6*lambda - gamma*dt;
for (int i = 0; i < dataD * dataH * dataW; i++)
{
h_Data[i] = 0;
}
// Copy kernel data to middle block of the input
int start_k = outKernelD/2 - kernelD/2;
int end_k = outKernelD/2 + kernelD/2 + 1;
int start_i = outKernelH/2 - kernelH/2;
int end_i = outKernelH/2 + kernelH/2 + 1;
int start_j = outKernelW/2 - kernelW/2;
int end_j = outKernelW/2 + kernelW/2 + 1;
int kk = 0, ki = 0, kj = 0;
int strideD = dataH * dataW;
int strideH = dataW;
int kstrideD = kernelH * kernelW;
int kstrideH = kernelW;
for (int k = start_k; k < end_k; k++) {
for (int i = start_i; i < end_i; i++) {
for (int j = start_j; j < end_j; j++) {
h_Data [k * strideD + i * strideH + j] = h_Kernel[kk * kstrideD + ki * kstrideH + kj];
// printf("%d,%d,%d -> %d,%d,%d\n", kk, ki, kj, k, i, j);
kj++;
}
ki++;
kj = 0;
}
kk++;
ki = 0;
}
#ifdef CALC_MEM_RQ
const size_t numGPUs = 1;
size_t workSizeFwd[numGPUs];
size_t workSizeInv[numGPUs];
hipfftGetSize3d(fftPlanFwd, fftD, fftH, fftW, HIPFFT_R2C, workSizeFwd);
hipfftGetSize3d(fftPlanInv, fftD, fftH, fftW, HIPFFT_C2R, workSizeInv);
printf("Compute kernel forward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeFwd[0]);
printf("Compute kernel bckward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeInv[0]);
//return true;
#endif
printf("...uploading to GPU and padding convolution kernel and input data\n");
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(hipMemcpy(d_Kernel, h_Kernel, ksize_b, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Data, h_Data, dsize_b, hipMemcpyHostToDevice));
sdkStopTimer(&hTimer);
double dataTransferTime = sdkGetTimerValue(&hTimer);
// printf("\tData transfer: %f MPix/s (%f ms)\n", (double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(hipMemset(d_PaddedKernel, 0, fsize_b));
checkCudaErrors(hipMemset(d_PaddedData, 0, fsize_b));
double memsetPaddingTime = 0, buildKernelTimeTotalGPU = 0, buildKernelTimeTotalCPU = 0;
/********************************************
* Kernel Computation *
********************************************/
for (int filter_i = 0; filter_i < niter; filter_i++)
{
padKernel3D(
d_PaddedKernel,
d_Kernel,
fftD,
fftH,
fftW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX
);
checkCudaErrors(hipDeviceSynchronize());
padDataClampToBorder3D(
d_PaddedData,
d_Data,
fftD,
fftH,
fftW,
dataD,
dataH,
dataW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX
);
sdkStopTimer(&hTimer);
memsetPaddingTime += sdkGetTimerValue(&hTimer);
//Not including kernel transformation into time measurement,
//since convolution kernel is not changed very frequently
#ifdef PRINT_KERNEL
printf("...transforming convolution kernel k: %p\tkspec: %p\n",
d_PaddedKernel, d_KernelSpectrum);
#endif // PRINT_KERNEL
buildKernelTimeTotalGPU = 0;
buildKernelTimeTotalCPU = 0;
checkCudaErrors(hipDeviceSynchronize());
// d_KernelSpectrum = FFT(d_PaddedKernel)
checkCudaErrors(hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedKernel, (hipfftComplex *)d_KernelSpectrum));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(hipDeviceSynchronize());
#ifdef PRINT_KERNEL
printf("HERE------- %p\t%p\n", d_PaddedData, d_DataSpectrum);
#endif // PRINT_KERNEL
/********************************************
* Convolution *
********************************************/
checkCudaErrors(hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedKernel, (hipfftComplex *)d_KernelSpectrum));
checkCudaErrors(hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedData, (hipfftComplex *)d_DataSpectrum));
// Modulate WITHOUT scaling
int isFirstIter = (filter_i == 0)? 1 : 0;
// for (int iter = 1; iter < kernelRadius - isFirstIter; iter++)
// {
// modulate3D(d_DataSpectrum, d_KernelSpectrum, fftD, fftH, fftW, 1);
// }
complexPower(
d_KernelSpectrum,
fftD,
fftH,
fftW,
1,
kernelRadius - isFirstIter - 1
);
// Last iteration: Modulate AND scale
modulateAndNormalize3D(d_DataSpectrum, d_KernelSpectrum, fftD, fftH, fftW, 1);
checkCudaErrors(hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_DataSpectrum, (hipfftReal *)d_PaddedData));
checkCudaErrors(hipDeviceSynchronize());
#ifdef PRINT_KERNEL
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer);
printf("\t\tGPU computation: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (gpuTime * 0.001), gpuTime);
#endif // PRINT_KERNEL
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
unpadResult3D(
d_Data,
d_PaddedData,
dataD,
dataH,
dataW,
fftD,
fftH,
fftW
);
#ifdef PRINT_KERNEL
sdkStopTimer(&hTimer);
double unpadTime = sdkGetTimerValue(&hTimer);
printf("\t\tunpad results: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (unpadTime * 0.001), unpadTime);
printf("...reading back GPU convolution results\n");
#endif // PRINT_KERNEL
}
/********************************************************
* Kernel center extraction - FINAL time domain results *
********************************************************/
printf("...extract kernel window from center\n");
extractCenter3D(
d_Window,
d_Data,
dataD,
dataH,
dataW,
windowD,
windowH,
windowW
);
printf("...reading back kernel center from GPU\n");
checkCudaErrors(hipMemcpy(h_Window, d_Window, wsize_b, hipMemcpyDeviceToHost));
#ifdef TEST_KERNEL
for (int k = 0; k < kernelD; k++) {
for (int i = 0; i < kernelH; i++) {
for (int j = 0; j < kernelW; j++) {
printf(", %f", h_Kernel[k*kernelH*kernelD + i*kernelW + j]);
}
printf("\n");
}
}
// checkCudaErrors(hipMemcpy(h_Data, d_Data, dsize_b, hipMemcpyDeviceToHost));
int testW = 7;
int xbegin = windowW/2 - testW/2;
int xend = windowW - xbegin;
int ybegin = windowH/2 - testW/2;
int yend = windowH - ybegin;
int zbegin = windowD/2 - testW/2;
int zend = windowD - zbegin;
for (int z = zbegin; z < zend; z++)
for (int y = ybegin; y < yend; y++)
for (int x = xbegin; x < xend; x++)
{
double rGPU = (double)h_Window[z * windowH * windowW +
y * windowW + x];
printf("\t[%d,%d,%d] %.*f\n", x, y, z,
OP_DBL_Digs + 6, rGPU);
}
// DEBUG rat
// Check if filter add up to one
double ksum = 0.0f;
for (int z = 0; z < windowD; z++)
for (int y = 0; y < windowH; y++)
for (int x = 0; x < windowW; x++)
{
double rGPU = (double)h_Window[z * windowH * windowW + y * windowW + x];
ksum += rGPU;
}
cout << "Kernel sum: " << ksum << endl;
double diff = fabs(ksum - 1.0);
double precision = 0.001;
if (diff > precision){
cout << "Error in kernel computation: Incorrect mass " << diff << " > " << precision << endl;
exit(-2);
}
// print out kernel.vtk file
util::outputDiffusionKernel(h_Window, windowW, windowH, windowD, "output/kernel.vtk");
#endif // TEST_KERNEL
printf("\tData transfer: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
printf("\tMemset and padding: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (memsetPaddingTime * 0.001), memsetPaddingTime);
printf("\tTotal GPU time: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalGPU * 0.001), buildKernelTimeTotalGPU);
printf("\tTotal CPU time: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalCPU * 0.001), buildKernelTimeTotalCPU);
#ifdef COMPUTE_COVERAGE
printf("...comparing the results: ");
// Update indices for CPU input/output
cpu_input = (cpu_input + 1) % 2;
cpu_output = (cpu_output + 1) % 2;
printf("Results from GPU:\n");
displayWindowPlane(h_ResultGPU, dataW, dataH, 15, 10);
printf("Results from CPU:\n");
displayWindowPlane(h_ResultCPU[cpu_output], dataW, dataH, 15, 10);
//#ifdef COMPUTE_COVERAGE
printf("...computing coverage\n");
double sum_window;
for (int z = 0; z < windowD; z++)
for (int y = 0; y < windowH; y++)
for (int x = 0; x < windowW; x++)
{
double rGPU = (double)h_Window[z * windowH * windowW +
y * windowW + x];
sum_window += rGPU;
}
int hlfW = windowW/2;
int hlfH = windowH/2;
int hlfD = windowD/2;
printf("\tcoverage:\t\t%lf/%lf\t%lf\%\n", sum_window, sum, (sum_window/sum)*100.0);
printf("\tzero threshold:\tx:[%.*f, %.*f]\n\t\t\ty:[%.*f, %.*f]\n\t\t\tz:[%.*f, %.*f]\n",
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + hlfH * windowW + 0],
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + hlfH * windowW + (windowW - 1)],
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + 0 * windowW + hlfW],
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + (windowH - 1) * windowW + hlfW],
OP_DBL_Digs, h_Window[0 * windowH * windowW + hlfH * windowW + hlfW],
OP_DBL_Digs, h_Window[(windowD - 1) * windowH * windowW + hlfH * windowW + hlfW]);
#endif // COMPUTE_COVERAGE
printf("...shutting down\n");
sdkDeleteTimer(&hTimer);
/********************************************
* Pointer deallocations *
********************************************/
checkCudaErrors(hipFree(d_Data));
checkCudaErrors(hipFree(d_Kernel));
checkCudaErrors(hipFree(d_PaddedData));
checkCudaErrors(hipFree(d_PaddedKernel));
// Free after all chems have used this
checkCudaErrors(hipFree(d_KernelSpectrum));
free(h_Data);
return bRetVal;
}
bool computeKernelSpectrum3D(
c_ctx kernel_cctx,
c_ctx chem_cctx,
short int ic)
{
float *d_Kernel = kernel_cctx.d_data[ic];
fComplex *d_KernelSpectrum = chem_cctx.d_kernelspectrum_h[ic];
fComplex *h_KernelSpectrum = chem_cctx.h_kernelspectrum[ic]; // permanent storage for padded spectrum on host
float
*d_PaddedKernel;
hipfftHandle
fftPlanFwd;
bool bRetVal = true;
printf("Testing kernel spectrum computation\n");
const int kernelD = chem_cctx.KD;
const int kernelH = chem_cctx.KH;//kernel_cctx.DH;//7;
const int kernelW = chem_cctx.KW;//kernel_cctx.DW;//6;
const int kernelZ = chem_cctx.KZ;
const int kernelY = chem_cctx.KY;//kernel_cctx.DH / 2;//3;
const int kernelX = chem_cctx.KX;//kernel_cctx.DW / 2;//4;
const int fftD = chem_cctx.FFTD;
const int fftH = chem_cctx.FFTH;
const int fftW = chem_cctx.FFTW;
size_t fsize_b = fftD * fftH * fftW * sizeof(float);
size_t fssize_b = fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex);
// size_t ksize_b = kernelD * kernelH * kernelW * sizeof(float);
printf("\tkernelD: %d\tkernelH: %d\tkernelW: %d\n", kernelD, kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\tkernelZ: %d\n", kernelX, kernelY, kernelZ);
printf("\tfftD: %d\tfftH: %d\tfftW: %d\n", fftD, fftH, fftW);
printf("...allocating memory\n");
checkCudaErrors(hipMalloc((void **)&d_PaddedKernel, fsize_b));
// TODO: Continue here
// Changed
printf("...creating R2C FFT plans for %i x %i x %i\n", fftD, fftH, fftW);
checkCudaErrors(hipfftPlan3d(&fftPlanFwd, fftD, fftH, fftW, HIPFFT_R2C));
printf("...uploading to GPU and padding convolution kernel and input data\n");
checkCudaErrors(hipMemset(d_PaddedKernel, 0, fsize_b));
padKernel3D(
d_PaddedKernel,
d_Kernel,
fftD,
fftH,
fftW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX
);
checkCudaErrors(hipDeviceSynchronize());
// Changed
printf("...transforming convolution kernel\n");
// d_KernelSpectrum = FFT(d_PaddedKernel)
checkCudaErrors(hipfftExecR2C(fftPlanFwd, (hipfftReal *)d_PaddedKernel, (hipfftComplex *)d_KernelSpectrum));
checkCudaErrors(hipDeviceSynchronize());
// Transfer data from device to host (permanent storage)
checkCudaErrors(hipMemcpy(h_KernelSpectrum, d_KernelSpectrum, fssize_b, hipMemcpyDeviceToHost));
checkCudaErrors(hipfftDestroy(fftPlanFwd));
//checkCudaErrors(hipFree(d_Kernel)); // Not used after spectrum is calculated
checkCudaErrors(hipFree(d_PaddedKernel));
return bRetVal;
}
bool fftDiffuse3D(
float *d_Data,
fComplex *d_KernelSpectrum,
hipfftHandle fftPlanFwd,
hipfftHandle fftPlanInv,
c_ctx cctx,
int epiBoundary,
float baseChem)
{
int devID;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
hipGetDevice(&devID);
float
*d_PaddedData;
fComplex
*d_DataSpectrum;
/* hipfftHandle
fftPlanFwd,
fftPlanInv;
*/
bool bRetVal = true;
#ifdef PRINT_KERNEL
printf("Testing GPU chemical diffusion computation\n");
#endif // PRINT_KERNEL
const int kernelD = cctx.KD;
const int kernelH = cctx.KH;
const int kernelW = cctx.KW;
const int kernelZ = cctx.KZ;
const int kernelY = cctx.KY;
const int kernelX = cctx.KX;
const int dataD = cctx.DD;
const int dataH = cctx.DH;
const int dataW = cctx.DW;
const int fftD = cctx.FFTD;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
int ksize = kernelD * kernelH * kernelW;
int dsize = dataD * dataH * dataW;
int fsize = fftD * fftH * fftW;
int ksize_b = ksize * sizeof(float);
int dsize_b = dsize * sizeof(float);
int fsize_b = fsize * sizeof(float);
#ifdef PRINT_KERNEL
printf("\tkernelD: %d\tkernelH: %d\tkernelW: %d\n", kernelD, kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\tkernelZ: %d\n", kernelX, kernelY, kernelZ);
printf("\tdataD: %d\tdataH: %d\tdataW: %d\n", dataD, dataH, dataW);
printf("\tfftD: %d\tfftH: %d\tfftW: %d\n", fftD, fftH, fftW);
printf("...allocating memory ------\n");
#endif // PRINT_KERNEL
checkCudaErrors(hipMalloc((void **)&d_PaddedData, fsize_b));
checkCudaErrors(hipMalloc((void **)&d_DataSpectrum, fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex)));
#ifdef CALC_MEM_RQ
const size_t numGPUs = 1;
size_t workSizeFwd[numGPUs];
size_t workSizeInv[numGPUs];
checkCudaErrors(hipfftEstimate3d(fftD, fftH, fftW, HIPFFT_R2C, workSizeFwd));
checkCudaErrors(hipfftEstimate3d(fftD, fftH, fftW, HIPFFT_C2R, workSizeInv));
printf("Compute chem forward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeFwd[0]);
printf("Compute chem bckward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeInv[0]);
//return true;
#endif
#ifdef PRINT_KERNEL
printf("...creating R2C & C2R FFT plans for %i x %i x %i\n", fftD, fftH, fftW);
#endif //PRINT_KERNEL
// checkCudaErrors(hipfftPlan3d(&fftPlanFwd, fftD, fftH, fftW, HIPFFT_R2C));
// checkCudaErrors(hipfftPlan3d(&fftPlanInv, fftD, fftH, fftW, HIPFFT_C2R));
#ifdef CALC_MEM_RQ
hipfftGetSize3d(fftPlanFwd, fftD, fftH, fftW, HIPFFT_R2C, workSizeFwd);
hipfftGetSize3d(fftPlanInv, fftD, fftH, fftW, HIPFFT_C2R, workSizeInv);
printf("Compute kernel forward size %d x %d x %d requires %f GB\n", fftW, fftH, fftD,
(float) workSizeFwd[0]/(1024.0*1024.0*1024.0));
printf("Compute kernel bckward size %d x %d x %d requires %f GB\n", fftW, fftH, fftD,
(float) workSizeInv[0]/(1024.0*1024.0*1024.0));
//return true;
#endif
firstTransferCompleted = true;
checkCudaErrors(hipMemset(d_PaddedData, 0, fsize_b));
/********************************************
* Pad data *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// padDataClampToBorder3D(
// d_PaddedData,
// d_Data,
// fftD,
// fftH,
// fftW,
// dataD,
// dataH,
// dataW,
// kernelD,
// kernelH,
// kernelW,
// kernelZ,
// kernelY,
// kernelX
// );
// DEBUG rat
padDataConstantVF3D(
d_PaddedData,
d_Data,
fftD,
fftH,
fftW,
dataD,
dataH,
dataW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX,
epiBoundary,
baseChem
);
#ifdef PRINT_KERNEL
printf("...performing convolution\n");
#endif // PRINT_KERNEL
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double padTime = sdkGetTimerValue(&hTimer);
// --------- Computing convolution ------------ begin
/********************************************
* Compute FFT{data} *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// d_DataSpectrum = FFT{d_PaddedData}
checkCudaErrors(hipfftExecR2C(fftPlanFwd,
(hipfftReal *)d_PaddedData, (hipfftComplex *)d_DataSpectrum));
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double fftTime = sdkGetTimerValue(&hTimer);
/********************************************
* Spectrum Point-wise Multiplication *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// d_DataSpectrum = d_DataSpectrum * d_KernelSpectrum
modulateAndNormalize3D(d_DataSpectrum, d_KernelSpectrum, fftD, fftH, fftW, 1);
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double multTime = sdkGetTimerValue(&hTimer);
/********************************************
* Compute IFFT{data_spectrum} *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// d_PaddedData = IFFT{d_DataSpectrum}
checkCudaErrors(hipfftExecC2R(fftPlanInv, (hipfftComplex *)d_DataSpectrum, (hipfftReal *)d_PaddedData));
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double ifftTime = sdkGetTimerValue(&hTimer);
// --------- Computing convolution ------------ end
/********************************************
* Unpad results *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
#ifdef PRINT_KERNEL
printf("...removing result padding\n");
#endif // PRINT_KERNEL
unpadResult3D(
d_Data,
d_PaddedData,
dataD,
dataH,
dataW,
fftD,
fftH,
fftW
);
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
double unpadTime = sdkGetTimerValue(&hTimer);
/********************************************
* Execution Time Display *
********************************************/
printf("\t\t\t\t[%d] pad took: %f ms\n", devID, padTime);
printf("\t\t\t\t[%d] FFT took: %f ms\n", devID, fftTime);
printf("\t\t\t\t[%d] MULT took: %f ms\n", devID, multTime);
printf("\t\t\t\t[%d] IFFT took: %f ms\n", devID, ifftTime);
printf("\t\t\t\t[%d] unpad took: %f ms\n", devID, unpadTime);
/********************************************
* Deallocation of plans and memory *
********************************************/
sdkDeleteTimer(&hTimer);
// checkCudaErrors(hipfftDestroy(fftPlanInv));
// checkCudaErrors(hipfftDestroy(fftPlanFwd));
#ifdef PRINT_KERNEL
printf("...freeing device pointers\n");
#endif // PRINT_KERNEL
checkCudaErrors(hipFree(d_PaddedData));
checkCudaErrors(hipFree(d_DataSpectrum));
#ifdef PRINT_KERNEL
printf("...returning to main()\n");
#endif // PRINT_KERNEL
return bRetVal;
}
#endif // MODEL_3D
#endif // GPU_DIFFUSE (*)
| 9471faed0a0f2f5b37f7354cbe9daf96d6535f7e.cu | /*
* Diffusion.cpp
*
* Created on: May 18, 2016
* Author: NungnunG
*/
#include "../common.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
// Include CUDA runtime and CUFFT
#include <cuda_runtime.h>
#include <cufft.h>
// Helper functions for CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "../Utilities/output_utils.h"
#include "convolutionFFT_common.h"
//#include "convolutionFFT2D.cu"
using namespace std;
#ifdef GPU_DIFFUSE // (*)
bool firstTransferCompleted = false;
////////////////////////////////////////////////////////////////////////////////
//// Helper functions
//////////////////////////////////////////////////////////////////////////////////
int snapTransformSize(int dataSize)
{
int hiBit;
unsigned int lowPOT, hiPOT;
dataSize = iAlignUp(dataSize, 16);
for (hiBit = 31; hiBit >= 0; hiBit--)
if (dataSize & (1U << hiBit))
{
break;
}
lowPOT = 1U << hiBit;
if (lowPOT == (unsigned int)dataSize)
{
return dataSize;
}
hiPOT = 1U << (hiBit + 1);
if (hiPOT <= 1024)
{
return hiPOT;
}
else
{
//return iAlignUp(dataSize, 512);
return pow(2, ceil(log(dataSize)/log(2)));
}
}
void H2D(int ic, c_ctx* chem_cctx, int np)
{
#ifdef MODEL_3D
int ig = chem_cctx->gpu_id[ic];
cudaSetDevice(chem_cctx->dev_id[ic]);//ig);
#ifndef M40
const int fftD = chem_cctx->FFTD;
const int fftH = chem_cctx->FFTH;
const int fftW = chem_cctx->FFTW;
size_t fssize_b = fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex);
// size_t fsize_b = fftD * fftH * fftW * sizeof(float);
// Copy Kernel Spectrum from Host to Device
checkCudaErrors(cudaMemcpy(chem_cctx->d_kernelspectrum_h[ic], chem_cctx->h_kernelspectrum[ic],
fssize_b, cudaMemcpyHostToDevice));
#endif // M40
#endif // MODEL_3D
float **h_ibuffs = chem_cctx->h_ibuffs;
#ifdef ASYNCCPY
printf("async P2D copy: gpu[%d] chem [%d]\n", ig, ic);
checkCudaErrors(cudaMemcpyAsync(chem_cctx->d_data[ic], h_ibuffs[ic], np*sizeof(float),
cudaMemcpyHostToDevice, stream[ig]));
#else // ASYNCCPY
checkCudaErrors(cudaMemcpy(chem_cctx->d_data[ic], h_ibuffs[ic],
np*sizeof(float), cudaMemcpyHostToDevice));
#endif // ASYNCCPY
}
void D2H(int ic, c_ctx* chem_cctx, int np)
{
#ifdef MODEL_3D
int ig = chem_cctx->gpu_id[ic];
cudaSetDevice(chem_cctx->dev_id[ic]);//ig);
#endif // MODEL_3D
float **h_obuffs = chem_cctx->h_obuffs;
#ifdef ASYNCCPY
printf("async D2P copy: gpu[%d] chem [%d]\n", i, ic);
checkCudaErrors(cudaMemcpyAsync(h_obuffs[ic], chem_cctx->d_data[ic], np*sizeof(float),
cudaMemcpyDeviceToHost, stream[ig]));
#else // ASYNCCPY
checkCudaErrors(cudaMemcpy(h_obuffs[ic], chem_cctx->d_data[ic],
np*sizeof(float), cudaMemcpyDeviceToHost));
#endif // ASYNCCPY
}
bool compareResults(float *h_ResultCPU, float *h_ResultGPU,
int dataW, int dataH, int dataD, float eThreshold)
{
double sum_delta2 = 0;
double sum_ref2 = 0;
double max_delta_ref = 0;
double sum = 0;
for (int z = 0; z < dataD; z++)
for (int y = 0; y < dataH; y++)
for (int x = 0; x < dataW; x++)
{
double rCPU = (double)h_ResultCPU[z * dataH * dataW + y * dataW + x];
double rGPU = (double)h_ResultGPU[z * dataH * dataW + y * dataW + x];
double delta = (rCPU - rGPU) * (rCPU - rGPU);
double ref = rCPU * rCPU + rCPU * rCPU;
if ((delta / ref) > max_delta_ref)
{
max_delta_ref = delta / ref;
}
// if ((ref-0.0) > 0.000001){
// if ((delta / ref) > max_delta_ref)
// {
// max_delta_ref = delta / ref;
// }
// }
sum_delta2 += delta;
sum_ref2 += ref;
sum += rGPU;
}
double L2norm = sqrt(sum_delta2 / sum_ref2);
printf("rel L2 = %E (max delta = %E)\n", L2norm, sqrt(max_delta_ref));
bool bRetVal = (L2norm < eThreshold) ? true : false;
printf(bRetVal ? "L2norm Error OK\n" : "L2norm Error too high!\n");
return bRetVal;
}
#ifndef MODEL_3D
/*
bool computeChemDiffusionCPU(
float *h_ChemOut,
float *h_ChemIn,
float *h_Kernel,
c_ctx cctx,
int iter)
{
bool bRetVal = 1;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
printf("Testing Chemical Diffusion CPU\n");
const int kernelH = cctx.KH;//7;
const int kernelW = cctx.KW;//6;
const int kernelY = cctx.KY;//3;
const int kernelX = cctx.KX;//4;
const int dataH = cctx.DH;//100;//1160;//2000;
const int dataW = cctx.DW;//100;//1660;//2000;
const int outKernelH = cctx.DH;
const int outKernelW = cctx.DW;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
fprintf(stderr,"...running reference CPU convolution\n");
convolutionClampToBorderCPU(
h_ChemOut,
h_ChemIn,
h_Kernel,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
sdkStopTimer(&hTimer);
double cpuTime = sdkGetTimerValue(&hTimer);
printf("\t\tCPU chemical diffusion computation:\t%f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (cpuTime * 0.001), cpuTime);
sdkDeleteTimer(&hTimer);
return bRetVal;
}
*/
void padData2D(
pad_t pt,
float *d_PaddedData,
float *d_Data,
int fftH,
int fftW,
int dataH,
int dataW,
int kernelH,
int kernelW,
int kernelY,
int kernelX,
int epiBoundary,
float baseChem)
{
switch(pt)
{
case pClampToBorder:
padDataClampToBorder(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX);
break;
case pRightWall:
padDataRightWall(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX);
break;
case pMirror:
padDataMirror(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX);
break;
case pConstantVF:
padDataConstantVF(
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX,
epiBoundary,
baseChem);
break;
}
}
bool computeKernel(
float *d_Window,
int kernelRadius,
float lambda,
float gamma, // decay constant
float dt,
c_ctx cctx)
{
/********************************************
* Declarations and allocations *
********************************************/
float t = 0.0;
int
cpu_input = 0,
cpu_output = 1;
float
*h_Data,
*h_Kernel,
*h_Window,
*h_ResultGPU,
*h_ResultCPU[2];
float
*d_Data,
*d_PaddedData,
*d_Kernel,
*d_PaddedKernel;
fComplex
*d_DataSpectrum,
*d_KernelSpectrum;
cufftHandle
fftPlanFwd,
fftPlanInv;
bool bRetVal = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
#ifdef PRINT_KERNEL
fprintf(stderr, "Testing kernel computation\n");
printf("Testing kernel computation\n");
fprintf(stderr, "\tBuilding filter kernel\n");
#endif
const int kernelH = cctx.KH;//7;
const int kernelW = cctx.KW;//6;
const int kernelY = cctx.KY;//3;
const int kernelX = cctx.KX;//4;
const int dataH = cctx.DH;//100;//1160;//2000;
const int dataW = cctx.DW;//100;//1660;//2000;
const int outKernelH = cctx.DH;
const int outKernelW = cctx.DW;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
// Changed 2
const int windowH = cctx.windowH;
const int windowW = cctx.windowW;
#ifdef PRINT_KERNEL
fprintf(stderr, "...allocating memory\n");
#endif
h_Data = (float *)malloc(dataH * dataW * sizeof(float));
h_Kernel = (float *)malloc(kernelH * kernelW * sizeof(float));
h_Window = (float *)malloc(windowH * windowW * sizeof(float));
h_ResultGPU = (float *)malloc(dataH * dataW * sizeof(float));
h_ResultCPU[cpu_input] = (float *)malloc(dataH * dataW * sizeof(float));
h_ResultCPU[cpu_output] = (float *)malloc(dataH * dataW * sizeof(float));
checkCudaErrors(cudaMalloc((void **)&d_Data, dataH * dataW * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Kernel, kernelH * kernelW * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_PaddedData, fftH * fftW * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_PaddedKernel, fftH * fftW * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_DataSpectrum, fftH * (fftW / 2 + 1) * sizeof(fComplex)));
checkCudaErrors(cudaMalloc((void **)&d_KernelSpectrum, fftH * (fftW / 2 + 1) * sizeof(fComplex)));
#ifdef PRINT_KERNEL
fprintf(stderr, "...generating 2D %d x %d kernel coefficients\n", kernelH, kernelW);
#endif
/********************************************
* Initial kernel initialization *
********************************************/
for (int i = 0; i < kernelH * kernelW; i++)
{
h_Kernel[i] = 0;
}
h_Kernel[0 * kernelW + 1] = lambda;
h_Kernel[1 * kernelW + 0] = lambda;
h_Kernel[1 * kernelW + 2] = lambda;
h_Kernel[2 * kernelW + 1] = lambda;
h_Kernel[1 * kernelW + 1] = 1 - 4*lambda - gamma*dt;
for (int i = 0; i < dataH * dataW; i++)
{
h_Data[i] = 0;
h_ResultCPU[cpu_input][i] = 0;
}
// Copy kernel data to middle block of the input
int start_i = outKernelH/2 - kernelH/2;
int end_i = outKernelH/2 + kernelH/2 + 1;
int start_j = outKernelW/2 - kernelW/2;
int end_j = outKernelW/2 + kernelW/2 + 1;
int ki = 0, kj = 0;
for (int i = start_i; i < end_i; i++) {
for (int j = start_j; j < end_j; j++) {
h_Data [i * dataW + j] = h_Kernel[ki * kernelW + kj];
h_ResultCPU [cpu_input] [i * dataW + j] = h_Kernel[ki * kernelW + kj];
#ifdef PRINT_KERNEL
printf("%d,%d -> %d,%d\n", ki, kj, i, j);
#endif
kj++;
}
ki++;
kj = 0;
}
#ifdef PRINT_KERNEL
fprintf(stderr, "...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
#endif
checkCudaErrors(cufftPlan2d(&fftPlanFwd, fftH, fftW, CUFFT_R2C));
checkCudaErrors(cufftPlan2d(&fftPlanInv, fftH, fftW, CUFFT_C2R));
#ifdef PRINT_KERNEL
fprintf(stderr, "...uploading to GPU and padding convolution kernel and input data\n");
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(cudaMemcpy(d_Kernel, h_Kernel, kernelH * kernelW * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Data, h_Data, dataH * dataW * sizeof(float), cudaMemcpyHostToDevice));
sdkStopTimer(&hTimer);
double dataTransferTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
// printf("\tData transfer: %f MPix/s (%f ms)\n", (double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(cudaMemset(d_PaddedKernel, 0, fftH * fftW * sizeof(float)));
checkCudaErrors(cudaMemset(d_PaddedData, 0, fftH * fftW * sizeof(float)));
padKernel(
d_PaddedKernel,
d_Kernel,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
padData2D(
pClampToBorder,
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX,
-1,
-1
);
sdkStopTimer(&hTimer);
double memsetPaddingTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
// printf("\tMemset and padding: %f MPix/s (%f ms)\n", (double)dataH * (double)dataW * 1e-6 / (memsetPaddingTime * 0.001), memsetPaddingTime);
#endif
//Not including kernel transformation into time measurement,
//since convolution kernel is not changed very frequently
#ifdef PRINT_KERNEL
fprintf(stderr, "...transforming convolution kernel\n");
#endif
double buildKernelTimeTotalGPU = 0;
double buildKernelTimeTotalCPU = 0;
checkCudaErrors(cudaDeviceSynchronize());
/********************************************
* Kernel Computation *
********************************************/
// d_KernelSpectrum = FFT(d_PaddedKernel)
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedKernel, (cufftComplex *)d_KernelSpectrum));
for (int iter = 0; iter < kernelRadius; iter++)
{
#ifdef PRINT_KERNEL
fprintf(stderr, "...running GPU Kernel building iteration %d:\n", iter);
printf("GPU Kernel building iteration %d:\n", iter);
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(cudaDeviceSynchronize());
/********************************************
* Convolution *
********************************************/
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedData, (cufftComplex *)d_DataSpectrum));
modulateAndNormalize(d_DataSpectrum, d_KernelSpectrum, fftH, fftW, 1);
checkCudaErrors(cufftExecC2R(fftPlanInv, (cufftComplex *)d_DataSpectrum, (cufftReal *)d_PaddedData));
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
printf("\t\tGPU computation: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (gpuTime * 0.001), gpuTime);
#endif
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
#ifdef PRINT_KERNEL
fprintf(stderr, "...removing results padding\n");
#endif
unpadResult(
d_Data,
d_PaddedData,
dataH,
dataW,
fftH,
fftW
);
sdkStopTimer(&hTimer);
double unpadTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
printf("\t\textract results: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (unpadTime * 0.001), unpadTime);
fprintf(stderr, "...reading back GPU convolution results\n");
#endif
checkCudaErrors(cudaMemcpy(h_ResultGPU, d_Data, dataH * dataW * sizeof(float), cudaMemcpyDeviceToHost));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
#ifdef PRINT_KERNEL
fprintf(stderr, "...running reference CPU convolution\n");
#endif
// convolutionClampToBorderCPU(
// h_ResultCPU[cpu_output],
// h_ResultCPU[cpu_input],
// h_Kernel,
// dataH,
// dataW,
// kernelH,
// kernelW,
// kernelY,
// kernelX
// );
sdkStopTimer(&hTimer);
double cpuTime = sdkGetTimerValue(&hTimer);
#ifdef PRINT_KERNEL
printf("\t\tCPU computation: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (cpuTime * 0.001), cpuTime);
#endif
buildKernelTimeTotalGPU += gpuTime;
buildKernelTimeTotalGPU += unpadTime;
buildKernelTimeTotalCPU += cpuTime;
t += dt;
// Update indices for CPU input/output
cpu_input = (cpu_input + 1) % 2;
cpu_output = (cpu_output + 1) % 2;
}
#ifdef PRINT_KERNEL
printf("...extract kernel window from center\n");
#endif
/********************************************************
* Kernel center extraction - FINAL time domain results *
********************************************************/
extractCenter(
d_Window,
d_Data,
dataH,
dataW,
windowH,
windowW
);
#ifdef PRINT_KERNEL
printf("...reading back kernel center from GPU\n");
#endif
checkCudaErrors(cudaMemcpy(h_Window, d_Window, windowH * windowW * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < kernelH; i++) {
for (int j = 0; j < kernelW; j++) {
#ifdef PRINT_KERNEL
printf(", %f", h_Kernel[i*kernelW + j]);
#endif
}
#ifdef PRINT_KERNEL
printf("\n");
#endif
}
#ifdef PRINT_KERNEL
printf("\tData transfer: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
printf("\tMemset and padding: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (memsetPaddingTime * 0.001), memsetPaddingTime);
printf("\tTotal GPU time: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalGPU * 0.001), buildKernelTimeTotalGPU);
printf("\tTotal CPU time: %f MPix/s (%f ms)\n",
(double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalCPU * 0.001), buildKernelTimeTotalCPU);
fprintf(stderr, "...comparing the results: ");
#endif
// Update indices for CPU input/output
cpu_input = (cpu_input + 1) % 2;
cpu_output = (cpu_output + 1) % 2;
// bRetVal = compareResults(h_ResultCPU[cpu_output], h_ResultGPU,
// dataW, dataH, 1,
// 1e-6);
#ifdef PRINT_KERNEL
fprintf(stderr, "...shutting down\n");
#endif
sdkDeleteTimer(&hTimer);
/********************************************
* Pointer deallocations *
********************************************/
checkCudaErrors(cufftDestroy(fftPlanInv));
checkCudaErrors(cufftDestroy(fftPlanFwd));
checkCudaErrors(cudaFree(d_DataSpectrum));
checkCudaErrors(cudaFree(d_KernelSpectrum));
checkCudaErrors(cudaFree(d_PaddedKernel));
checkCudaErrors(cudaFree(d_Data));
checkCudaErrors(cudaFree(d_Kernel));
checkCudaErrors(cudaFree(d_PaddedData));
free(h_ResultCPU[0]);
free(h_ResultCPU[1]);
free(h_ResultGPU);
free(h_Window);
free(h_Data);
free(h_Kernel);
return bRetVal;
}
bool computeKernelSpectrum(
fComplex *d_KernelSpectrum,
float *d_Kernel,
c_ctx kernel_cctx,
c_ctx chem_cctx
)
{
float
*d_UnpaddedKernel,
*d_PaddedKernel;
// Changed
cufftHandle
// fftPlanFwd,
// fftPlanInv;
fftPlan;
bool bRetVal = true;
#ifdef PRINT_KERNEL
printf("Testing kernel spectrum computation\n");
fprintf(stderr, "Testing kernel spectrum computation\n");
#endif
const int kernelH = chem_cctx.KH;//kernel_cctx.DH;//7;
const int kernelW = chem_cctx.KW;//kernel_cctx.DW;//6;
const int kernelY = chem_cctx.KY;//kernel_cctx.DH / 2;//3;
const int kernelX = chem_cctx.KX;//kernel_cctx.DW / 2;//4;
const int fftH = chem_cctx.FFTH;
const int fftW = chem_cctx.FFTW;
#ifdef PRINT_KERNEL
printf("\tkernelH: %d\tkernelW: %d\n", kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\n", kernelX, kernelY);
printf("\tfftH: %d\tfftW: %d\n", fftH, fftW);
fprintf(stderr,"...allocating memory\n");
#endif
checkCudaErrors(cudaMalloc((void **)&d_PaddedKernel, fftH * fftW * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_UnpaddedKernel, kernelH * kernelW * sizeof(float)));
// Changed
#ifdef PRINT_KERNEL
// printf("...creating R2C FFT plans for %i x %i\n", fftH, fftW);
#endif
// checkCudaErrors(cufftPlan2d(&fftPlanFwd, fftH, fftW, CUFFT_R2C));
// checkCudaErrors(cufftPlan2d(&fftPlanInv, fftH, fftW, CUFFT_C2R));
#ifdef PRINT_KERNEL
printf("...creating C2C FFT plan for %i x %i\n", fftH, fftW / 2);
#endif
checkCudaErrors(cufftPlan2d(&fftPlan, fftH, fftW / 2, CUFFT_C2C));
#ifdef PRINT_KERNEL
fprintf(stderr,"...uploading to GPU and padding convolution kernel and input data\n");
#endif
checkCudaErrors(cudaMemset(d_PaddedKernel, 0, fftH * fftW * sizeof(float)));
padKernel(
d_PaddedKernel,
d_Kernel,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
checkCudaErrors(cudaDeviceSynchronize());
// Changed
// d_KernelSpectrum = FFT(d_PaddedKernel)
// checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedKernel, (cufftComplex *)d_KernelSpectrum));
//CUFFT_INVERSE works just as well...
const int FFT_DIR = CUFFT_FORWARD;
#ifdef PRINT_KERNEL
printf("...transforming convolution kernel\n");
#endif
checkCudaErrors(cufftExecC2C(fftPlan, (cufftComplex *)d_PaddedKernel, (cufftComplex *)d_KernelSpectrum, FFT_DIR));
checkCudaErrors(cudaDeviceSynchronize());
// Changed
// checkCudaErrors(cufftDestroy(fftPlanFwd));
checkCudaErrors(cufftDestroy(fftPlan));
checkCudaErrors(cudaFree(d_PaddedKernel));
checkCudaErrors(cudaFree(d_UnpaddedKernel));
return bRetVal;
}
bool fftDiffuse2D(
float *d_Data,
fComplex *d_KernelSpectrum0,
c_ctx cctx,
int epiBoundary,
float baseChem)
{
float
*d_PaddedData;
fComplex
*d_DataSpectrum0;
cufftHandle
fftPlan;
bool bRetVal = 1;
#ifdef PRINT_KERNEL
printf("Testing GPU chemical diffusion computation\n");
fprintf(stderr,"Testing GPU chemical diffusion computation\n");
#endif
const int kernelH = cctx.KH;
const int kernelW = cctx.KW;
const int kernelY = cctx.KY;
const int kernelX = cctx.KX;
const int dataH = cctx.DH;
const int dataW = cctx.DW;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
#ifdef PRINT_KERNEL
printf("\tkernelH: %d\tkernelW: %d\n", kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\n", kernelX, kernelY);
printf("\tdataH: %d\tdataW: %d\n", dataH, dataH);
printf("\tfftH: %d\tfftW: %d\n", fftH, fftW);
fprintf(stderr,"...allocating memory\n");
#endif
checkCudaErrors(cudaMalloc((void **)&d_PaddedData, fftH * fftW * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_DataSpectrum0, fftH * (fftW / 2) * sizeof(fComplex)));
#ifdef PRINT_KERNEL
printf("...creating C2C FFT plan for %i x %i\n", fftH, fftW / 2);
#endif
checkCudaErrors(cufftPlan2d(&fftPlan, fftH, fftW / 2, CUFFT_C2C));
#ifdef PRINT_KERNEL
fprintf(stderr,"...uploading to GPU and padding input data\n");
#endif
checkCudaErrors(cudaMemset(d_PaddedData, 0, fftH * fftW * sizeof(float)));
// DEBUG
// printf("------ before padding data\n");
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < dataW; j++) {
// printf(" %f ",h_ChemIn[i * dataW + j]);
// }
// printf("\n");
// }
// printf("\n");
// float *h_temp = (float *) malloc(fftH * fftH * sizeof(float));
// checkCudaErrors(cudaMemcpy(h_temp, d_PaddedData, dataH * dataW * sizeof(float), cudaMemcpyDeviceToHost));
// printf("------ before padding\n");
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < fftW; j++) {
// printf(" %f ",h_temp[i * fftW + j]);
// }
// printf("\n");
// }
// printf("\n");
padData2D(
pConstantVF, // pRightWall, //pMirror, //pClampToBorder,
d_PaddedData,
d_Data,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX,
epiBoundary,
baseChem
// 0.00
);
// DEBUG
// checkCudaErrors(cudaMemcpy(h_temp, d_PaddedData, dataH * dataW * sizeof(float), cudaMemcpyDeviceToHost));
// printf("------ after padding\n");
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < fftW; j++) {
// printf(" %f ",h_temp[i * fftW + j]);
// }
// printf("\n");
// }
// free(h_temp);
#ifdef PRINT_KERNEL
fprintf(stderr,"...performing convolution\n");
#endif
// Changed : Added
//CUFFT_INVERSE works just as well...
const int FFT_DIR = CUFFT_FORWARD;
checkCudaErrors(cudaDeviceSynchronize());
// --------- Computing convolution ------------ begin
// d_DataSpectrum = FFT(d_PaddedData)
checkCudaErrors(cufftExecC2C(fftPlan, (cufftComplex *)d_PaddedData, (cufftComplex *)d_DataSpectrum0, FFT_DIR));
// d_DataSpectrum = d_DataSpectrum * d_KernelSpectrum
#ifdef PRINT_KERNEL
printf( "fftH: %d\tfftW: %d\n", fftH, fftW);
#endif
spProcess2D(d_DataSpectrum0, d_DataSpectrum0, d_KernelSpectrum0, fftH, fftW / 2, FFT_DIR);
// d_PaddedData = IFFT(d_DataSpectrum) <------- Output
checkCudaErrors(cufftExecC2C(fftPlan, (cufftComplex *)d_DataSpectrum0, (cufftComplex *)d_PaddedData, -FFT_DIR));
// --------- Computing convolution ------------ end
checkCudaErrors(cudaDeviceSynchronize());
#ifdef PRINT_KERNEL
fprintf(stderr,"...removing results padding\n");
#endif
unpadResult(
d_Data,
d_PaddedData,
dataH,
dataW,
fftH,
fftW
);
#ifdef PRINT_KERNEL
fprintf(stderr,"...reading back GPU convolution results\n");
#endif
checkCudaErrors(cufftDestroy(fftPlan));
#ifdef PRINT_KERNEL
printf("...freeing device pointers\n");
#endif
checkCudaErrors(cudaFree(d_DataSpectrum0));
checkCudaErrors(cudaFree(d_PaddedData));
#ifdef PRINT_KERNEL
printf("...returning to main()\n");
#endif
return bRetVal;
}
#else // MODEL_3D
void my_sleep(unsigned usec) {
struct timespec req, rem;
int err;
req.tv_sec = usec / 1000000;
req.tv_nsec = (usec % 1000000) * 1000000000;
while ((req.tv_sec != 0) || (req.tv_nsec != 0)) {
if (nanosleep(&req, &rem) == 0)
break;
err = errno;
// Interrupted; continue
if (err == EINTR) {
req.tv_sec = rem.tv_sec;
req.tv_nsec = rem.tv_nsec;
}
// Unhandleable error (EFAULT (bad pointer), EINVAL (bad timeval in tv_nsec), or ENOSYS (function not supported))
break;
}
}
void reportMemUsageGPU()
{
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
checkCudaErrors(cudaMemGetInfo( &free_byte, &total_byte )) ;
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f GB, free = %f GB, total = %f GB\n\n",
used_db/1024.0/1024.0/1024.0, free_db/1024.0/1024.0/1024.0,
total_db/1024.0/1024.0/1024.0);
}
bool computeKernel3DBatch(
int kernelRadius,
float *lambda,
float *gamma,
float dt,
c_ctx kern_cctx,
c_ctx chem_cctx
)
{
int *gpu_id = chem_cctx.gpu_id;
cufftHandle
fftPlanFwd,
fftPlanInv;
const int fftD = kern_cctx.FFTD;
const int fftH = kern_cctx.FFTH;
const int fftW = kern_cctx.FFTW;
bool bRetVal;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
double kernelSpectrumComputationTime = 0.0;
// Compute all kernels
for (int ic = 0; ic < N_CHEM; ic++){
int ig = gpu_id[ic];
checkCudaErrors(cudaSetDevice(chem_cctx.dev_id[ic]));//ig));
// Create FFT plans
// TODO: Make plan plans reusable
printf("...creating R2C & C2R 3D FFT plans for %i x %i x %i\n", fftD, fftH, fftW);
printf("\tchem %d on device %d\n", ic, ig);
checkCudaErrors(cufftPlan3d(&fftPlanFwd, fftD, fftH, fftW, CUFFT_R2C));
checkCudaErrors(cufftPlan3d(&fftPlanInv, fftD, fftH, fftW, CUFFT_C2R));
// reportMemUsageGPU();
/********************************************
* Kernel Computation *
********************************************/
bRetVal = computeKernel3D(
kernelRadius,
lambda[ic],
gamma[ic],
dt,
kern_cctx,
fftPlanFwd,
fftPlanInv,
ic
);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
/********************************************
* Kernel Spectrum Computation *
********************************************/
computeKernelSpectrum3D(
kern_cctx,
chem_cctx,
ic);
sdkStopTimer(&hTimer);
kernelSpectrumComputationTime += sdkGetTimerValue(&hTimer);
// Destroy reusable FFT plans
checkCudaErrors(cufftDestroy(fftPlanInv));
checkCudaErrors(cufftDestroy(fftPlanFwd));
}
printf("\tTotal ker spect computation: %f MPix/s (%f ms)\n",
(double)chem_cctx.DD * (double)chem_cctx.DH * (double) chem_cctx.DW * 1e-6 /
(kernelSpectrumComputationTime * 0.001),
kernelSpectrumComputationTime);
// /********************************************
// * Pointer deallocations *
// ********************************************/
// for (int ig = 0; ig < N_GPU; ig++)
// {
// checkCudaErrors(cudaFree(kern_cctx.d_data[ig])); // [1] to [N_CHEM-1] is the same as [0]
// checkCudaErrors(cudaFree(kern_cctx.d_kernelspectrum_h[ig]));
// }
printf("returning from compute kernel batch to main()\n");
return bRetVal;
}
bool computeKernel3D(
int kernelRadius,
float lambda,
float gamma, // decay constant
float dt,
c_ctx cctx,
cufftHandle fftPlanFwd,
cufftHandle fftPlanInv,
short int ic)
{
#ifdef COMPUTE_COVERAGE
int
cpu_input = 0,
cpu_output = 1;
#endif // COMPUTE_COVERAGE
float *h_Window = cctx.h_data[ic];
float *d_Window = cctx.d_data[ic];
fComplex *d_DataSpectrum = cctx.d_kernelspectrum_h[ic]; // Kernel result spectrum
float
*h_Data,
*h_Kernel,
*h_ResultGPU;
// *h_ResultCPU[2];
float
*d_Data,
*d_PaddedData,
*d_Kernel,
*d_PaddedKernel;
fComplex
*d_KernelSpectrum;
bool bRetVal = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
#ifdef PRINT_KERNEL
printf("Testing kernel computation\n");
printf("\tBuilding filter kernel\n");
#endif // PRINT_KERNEL
const int niter = cctx.niter;
const int kernelD = cctx.KD;
const int kernelH = cctx.KH;//7;
const int kernelW = cctx.KW;//6;
const int kernelZ = cctx.KZ;
const int kernelY = cctx.KY;//3;
const int kernelX = cctx.KX;//4;
const int dataD = cctx.DD;
const int dataH = cctx.DH;//100;//1160;//2000;
const int dataW = cctx.DW;//100;//1660;//2000;
const int outKernelD = cctx.DD;
const int outKernelH = cctx.DH;
const int outKernelW = cctx.DW;
const int fftD = cctx.FFTD;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
// Changed 2
const int windowD = cctx.windowD;
const int windowH = cctx.windowH;
const int windowW = cctx.windowW;
int ksize = kernelD * kernelH * kernelW;
int dsize = dataD * dataH * dataW;
int fsize = fftD * fftH * fftW;
int wsize = windowD * windowH * windowW;
int ksize_b = ksize * sizeof(float);
int dsize_b = dsize * sizeof(float);
int fsize_b = fsize * sizeof(float);
int wsize_b = wsize * sizeof(float);
#ifdef PRINT_KERNEL
printf("...allocating memory\n");
#endif // PRINT_KERNEL
h_Data = (float *)malloc(dsize_b);
h_Kernel = (float *)malloc(ksize_b);
checkCudaErrors(cudaMalloc((void **)&d_Data, dsize_b));
checkCudaErrors(cudaMalloc((void **)&d_Kernel, ksize_b));
checkCudaErrors(cudaMalloc((void **)&d_PaddedData, fsize_b));
checkCudaErrors(cudaMalloc((void **)&d_PaddedKernel, fsize_b));
printf("k: %p\n", d_PaddedKernel);
checkCudaErrors(cudaMalloc((void **)&d_KernelSpectrum, fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex)));
/********************************************
* Initial kernel initialization *
********************************************/
printf("...generating 3D %d x %d x %d kernel coefficients\n", kernelD, kernelH, kernelW);
for (int i = 0; i < kernelD * kernelH * kernelW; i++)
{
h_Kernel[i] = 0;
}
int hStride = kernelW;
int dStride = kernelH * kernelW;
h_Kernel[0 * dStride + 1 * hStride + 1] = lambda;
h_Kernel[2 * dStride + 1 * hStride + 1] = lambda;
h_Kernel[1 * dStride + 0 * hStride + 1] = lambda;
h_Kernel[1 * dStride + 2 * hStride + 1] = lambda;
h_Kernel[1 * dStride + 1 * hStride + 0] = lambda;
h_Kernel[1 * dStride + 1 * hStride + 2] = lambda;
h_Kernel[1 * dStride + 1 * hStride + 1] = 1 - 6*lambda - gamma*dt;
for (int i = 0; i < dataD * dataH * dataW; i++)
{
h_Data[i] = 0;
}
// Copy kernel data to middle block of the input
int start_k = outKernelD/2 - kernelD/2;
int end_k = outKernelD/2 + kernelD/2 + 1;
int start_i = outKernelH/2 - kernelH/2;
int end_i = outKernelH/2 + kernelH/2 + 1;
int start_j = outKernelW/2 - kernelW/2;
int end_j = outKernelW/2 + kernelW/2 + 1;
int kk = 0, ki = 0, kj = 0;
int strideD = dataH * dataW;
int strideH = dataW;
int kstrideD = kernelH * kernelW;
int kstrideH = kernelW;
for (int k = start_k; k < end_k; k++) {
for (int i = start_i; i < end_i; i++) {
for (int j = start_j; j < end_j; j++) {
h_Data [k * strideD + i * strideH + j] = h_Kernel[kk * kstrideD + ki * kstrideH + kj];
// printf("%d,%d,%d -> %d,%d,%d\n", kk, ki, kj, k, i, j);
kj++;
}
ki++;
kj = 0;
}
kk++;
ki = 0;
}
#ifdef CALC_MEM_RQ
const size_t numGPUs = 1;
size_t workSizeFwd[numGPUs];
size_t workSizeInv[numGPUs];
cufftGetSize3d(fftPlanFwd, fftD, fftH, fftW, CUFFT_R2C, workSizeFwd);
cufftGetSize3d(fftPlanInv, fftD, fftH, fftW, CUFFT_C2R, workSizeInv);
printf("Compute kernel forward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeFwd[0]);
printf("Compute kernel bckward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeInv[0]);
//return true;
#endif
printf("...uploading to GPU and padding convolution kernel and input data\n");
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(cudaMemcpy(d_Kernel, h_Kernel, ksize_b, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Data, h_Data, dsize_b, cudaMemcpyHostToDevice));
sdkStopTimer(&hTimer);
double dataTransferTime = sdkGetTimerValue(&hTimer);
// printf("\tData transfer: %f MPix/s (%f ms)\n", (double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(cudaMemset(d_PaddedKernel, 0, fsize_b));
checkCudaErrors(cudaMemset(d_PaddedData, 0, fsize_b));
double memsetPaddingTime = 0, buildKernelTimeTotalGPU = 0, buildKernelTimeTotalCPU = 0;
/********************************************
* Kernel Computation *
********************************************/
for (int filter_i = 0; filter_i < niter; filter_i++)
{
padKernel3D(
d_PaddedKernel,
d_Kernel,
fftD,
fftH,
fftW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX
);
checkCudaErrors(cudaDeviceSynchronize());
padDataClampToBorder3D(
d_PaddedData,
d_Data,
fftD,
fftH,
fftW,
dataD,
dataH,
dataW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX
);
sdkStopTimer(&hTimer);
memsetPaddingTime += sdkGetTimerValue(&hTimer);
//Not including kernel transformation into time measurement,
//since convolution kernel is not changed very frequently
#ifdef PRINT_KERNEL
printf("...transforming convolution kernel k: %p\tkspec: %p\n",
d_PaddedKernel, d_KernelSpectrum);
#endif // PRINT_KERNEL
buildKernelTimeTotalGPU = 0;
buildKernelTimeTotalCPU = 0;
checkCudaErrors(cudaDeviceSynchronize());
// d_KernelSpectrum = FFT(d_PaddedKernel)
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedKernel, (cufftComplex *)d_KernelSpectrum));
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
checkCudaErrors(cudaDeviceSynchronize());
#ifdef PRINT_KERNEL
printf("HERE------- %p\t%p\n", d_PaddedData, d_DataSpectrum);
#endif // PRINT_KERNEL
/********************************************
* Convolution *
********************************************/
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedKernel, (cufftComplex *)d_KernelSpectrum));
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedData, (cufftComplex *)d_DataSpectrum));
// Modulate WITHOUT scaling
int isFirstIter = (filter_i == 0)? 1 : 0;
// for (int iter = 1; iter < kernelRadius - isFirstIter; iter++)
// {
// modulate3D(d_DataSpectrum, d_KernelSpectrum, fftD, fftH, fftW, 1);
// }
complexPower(
d_KernelSpectrum,
fftD,
fftH,
fftW,
1,
kernelRadius - isFirstIter - 1
);
// Last iteration: Modulate AND scale
modulateAndNormalize3D(d_DataSpectrum, d_KernelSpectrum, fftD, fftH, fftW, 1);
checkCudaErrors(cufftExecC2R(fftPlanInv, (cufftComplex *)d_DataSpectrum, (cufftReal *)d_PaddedData));
checkCudaErrors(cudaDeviceSynchronize());
#ifdef PRINT_KERNEL
sdkStopTimer(&hTimer);
double gpuTime = sdkGetTimerValue(&hTimer);
printf("\t\tGPU computation: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (gpuTime * 0.001), gpuTime);
#endif // PRINT_KERNEL
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
unpadResult3D(
d_Data,
d_PaddedData,
dataD,
dataH,
dataW,
fftD,
fftH,
fftW
);
#ifdef PRINT_KERNEL
sdkStopTimer(&hTimer);
double unpadTime = sdkGetTimerValue(&hTimer);
printf("\t\tunpad results: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (unpadTime * 0.001), unpadTime);
printf("...reading back GPU convolution results\n");
#endif // PRINT_KERNEL
}
/********************************************************
* Kernel center extraction - FINAL time domain results *
********************************************************/
printf("...extract kernel window from center\n");
extractCenter3D(
d_Window,
d_Data,
dataD,
dataH,
dataW,
windowD,
windowH,
windowW
);
printf("...reading back kernel center from GPU\n");
checkCudaErrors(cudaMemcpy(h_Window, d_Window, wsize_b, cudaMemcpyDeviceToHost));
#ifdef TEST_KERNEL
for (int k = 0; k < kernelD; k++) {
for (int i = 0; i < kernelH; i++) {
for (int j = 0; j < kernelW; j++) {
printf(", %f", h_Kernel[k*kernelH*kernelD + i*kernelW + j]);
}
printf("\n");
}
}
// checkCudaErrors(cudaMemcpy(h_Data, d_Data, dsize_b, cudaMemcpyDeviceToHost));
int testW = 7;
int xbegin = windowW/2 - testW/2;
int xend = windowW - xbegin;
int ybegin = windowH/2 - testW/2;
int yend = windowH - ybegin;
int zbegin = windowD/2 - testW/2;
int zend = windowD - zbegin;
for (int z = zbegin; z < zend; z++)
for (int y = ybegin; y < yend; y++)
for (int x = xbegin; x < xend; x++)
{
double rGPU = (double)h_Window[z * windowH * windowW +
y * windowW + x];
printf("\t[%d,%d,%d] %.*f\n", x, y, z,
OP_DBL_Digs + 6, rGPU);
}
// DEBUG rat
// Check if filter add up to one
double ksum = 0.0f;
for (int z = 0; z < windowD; z++)
for (int y = 0; y < windowH; y++)
for (int x = 0; x < windowW; x++)
{
double rGPU = (double)h_Window[z * windowH * windowW + y * windowW + x];
ksum += rGPU;
}
cout << "Kernel sum: " << ksum << endl;
double diff = fabs(ksum - 1.0);
double precision = 0.001;
if (diff > precision){
cout << "Error in kernel computation: Incorrect mass " << diff << " > " << precision << endl;
exit(-2);
}
// print out kernel.vtk file
util::outputDiffusionKernel(h_Window, windowW, windowH, windowD, "output/kernel.vtk");
#endif // TEST_KERNEL
printf("\tData transfer: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (dataTransferTime * 0.001), dataTransferTime);
printf("\tMemset and padding: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (memsetPaddingTime * 0.001), memsetPaddingTime);
printf("\tTotal GPU time: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalGPU * 0.001), buildKernelTimeTotalGPU);
printf("\tTotal CPU time: %f MPix/s (%f ms)\n",
(double)dataD * (double)dataH * (double)dataW * 1e-6 / (buildKernelTimeTotalCPU * 0.001), buildKernelTimeTotalCPU);
#ifdef COMPUTE_COVERAGE
printf("...comparing the results: ");
// Update indices for CPU input/output
cpu_input = (cpu_input + 1) % 2;
cpu_output = (cpu_output + 1) % 2;
printf("Results from GPU:\n");
displayWindowPlane(h_ResultGPU, dataW, dataH, 15, 10);
printf("Results from CPU:\n");
displayWindowPlane(h_ResultCPU[cpu_output], dataW, dataH, 15, 10);
//#ifdef COMPUTE_COVERAGE
printf("...computing coverage\n");
double sum_window;
for (int z = 0; z < windowD; z++)
for (int y = 0; y < windowH; y++)
for (int x = 0; x < windowW; x++)
{
double rGPU = (double)h_Window[z * windowH * windowW +
y * windowW + x];
sum_window += rGPU;
}
int hlfW = windowW/2;
int hlfH = windowH/2;
int hlfD = windowD/2;
printf("\tcoverage:\t\t%lf/%lf\t%lf\%\n", sum_window, sum, (sum_window/sum)*100.0);
printf("\tzero threshold:\tx:[%.*f, %.*f]\n\t\t\ty:[%.*f, %.*f]\n\t\t\tz:[%.*f, %.*f]\n",
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + hlfH * windowW + 0],
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + hlfH * windowW + (windowW - 1)],
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + 0 * windowW + hlfW],
OP_DBL_Digs, h_Window[hlfD * windowH * windowW + (windowH - 1) * windowW + hlfW],
OP_DBL_Digs, h_Window[0 * windowH * windowW + hlfH * windowW + hlfW],
OP_DBL_Digs, h_Window[(windowD - 1) * windowH * windowW + hlfH * windowW + hlfW]);
#endif // COMPUTE_COVERAGE
printf("...shutting down\n");
sdkDeleteTimer(&hTimer);
/********************************************
* Pointer deallocations *
********************************************/
checkCudaErrors(cudaFree(d_Data));
checkCudaErrors(cudaFree(d_Kernel));
checkCudaErrors(cudaFree(d_PaddedData));
checkCudaErrors(cudaFree(d_PaddedKernel));
// Free after all chems have used this
checkCudaErrors(cudaFree(d_KernelSpectrum));
free(h_Data);
return bRetVal;
}
bool computeKernelSpectrum3D(
c_ctx kernel_cctx,
c_ctx chem_cctx,
short int ic)
{
float *d_Kernel = kernel_cctx.d_data[ic];
fComplex *d_KernelSpectrum = chem_cctx.d_kernelspectrum_h[ic];
fComplex *h_KernelSpectrum = chem_cctx.h_kernelspectrum[ic]; // permanent storage for padded spectrum on host
float
*d_PaddedKernel;
cufftHandle
fftPlanFwd;
bool bRetVal = true;
printf("Testing kernel spectrum computation\n");
const int kernelD = chem_cctx.KD;
const int kernelH = chem_cctx.KH;//kernel_cctx.DH;//7;
const int kernelW = chem_cctx.KW;//kernel_cctx.DW;//6;
const int kernelZ = chem_cctx.KZ;
const int kernelY = chem_cctx.KY;//kernel_cctx.DH / 2;//3;
const int kernelX = chem_cctx.KX;//kernel_cctx.DW / 2;//4;
const int fftD = chem_cctx.FFTD;
const int fftH = chem_cctx.FFTH;
const int fftW = chem_cctx.FFTW;
size_t fsize_b = fftD * fftH * fftW * sizeof(float);
size_t fssize_b = fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex);
// size_t ksize_b = kernelD * kernelH * kernelW * sizeof(float);
printf("\tkernelD: %d\tkernelH: %d\tkernelW: %d\n", kernelD, kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\tkernelZ: %d\n", kernelX, kernelY, kernelZ);
printf("\tfftD: %d\tfftH: %d\tfftW: %d\n", fftD, fftH, fftW);
printf("...allocating memory\n");
checkCudaErrors(cudaMalloc((void **)&d_PaddedKernel, fsize_b));
// TODO: Continue here
// Changed
printf("...creating R2C FFT plans for %i x %i x %i\n", fftD, fftH, fftW);
checkCudaErrors(cufftPlan3d(&fftPlanFwd, fftD, fftH, fftW, CUFFT_R2C));
printf("...uploading to GPU and padding convolution kernel and input data\n");
checkCudaErrors(cudaMemset(d_PaddedKernel, 0, fsize_b));
padKernel3D(
d_PaddedKernel,
d_Kernel,
fftD,
fftH,
fftW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX
);
checkCudaErrors(cudaDeviceSynchronize());
// Changed
printf("...transforming convolution kernel\n");
// d_KernelSpectrum = FFT(d_PaddedKernel)
checkCudaErrors(cufftExecR2C(fftPlanFwd, (cufftReal *)d_PaddedKernel, (cufftComplex *)d_KernelSpectrum));
checkCudaErrors(cudaDeviceSynchronize());
// Transfer data from device to host (permanent storage)
checkCudaErrors(cudaMemcpy(h_KernelSpectrum, d_KernelSpectrum, fssize_b, cudaMemcpyDeviceToHost));
checkCudaErrors(cufftDestroy(fftPlanFwd));
//checkCudaErrors(cudaFree(d_Kernel)); // Not used after spectrum is calculated
checkCudaErrors(cudaFree(d_PaddedKernel));
return bRetVal;
}
bool fftDiffuse3D(
float *d_Data,
fComplex *d_KernelSpectrum,
cufftHandle fftPlanFwd,
cufftHandle fftPlanInv,
c_ctx cctx,
int epiBoundary,
float baseChem)
{
int devID;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
cudaGetDevice(&devID);
float
*d_PaddedData;
fComplex
*d_DataSpectrum;
/* cufftHandle
fftPlanFwd,
fftPlanInv;
*/
bool bRetVal = true;
#ifdef PRINT_KERNEL
printf("Testing GPU chemical diffusion computation\n");
#endif // PRINT_KERNEL
const int kernelD = cctx.KD;
const int kernelH = cctx.KH;
const int kernelW = cctx.KW;
const int kernelZ = cctx.KZ;
const int kernelY = cctx.KY;
const int kernelX = cctx.KX;
const int dataD = cctx.DD;
const int dataH = cctx.DH;
const int dataW = cctx.DW;
const int fftD = cctx.FFTD;
const int fftH = cctx.FFTH;
const int fftW = cctx.FFTW;
int ksize = kernelD * kernelH * kernelW;
int dsize = dataD * dataH * dataW;
int fsize = fftD * fftH * fftW;
int ksize_b = ksize * sizeof(float);
int dsize_b = dsize * sizeof(float);
int fsize_b = fsize * sizeof(float);
#ifdef PRINT_KERNEL
printf("\tkernelD: %d\tkernelH: %d\tkernelW: %d\n", kernelD, kernelH, kernelW);
printf("\tkernelX: %d\tkernelY: %d\tkernelZ: %d\n", kernelX, kernelY, kernelZ);
printf("\tdataD: %d\tdataH: %d\tdataW: %d\n", dataD, dataH, dataW);
printf("\tfftD: %d\tfftH: %d\tfftW: %d\n", fftD, fftH, fftW);
printf("...allocating memory ------\n");
#endif // PRINT_KERNEL
checkCudaErrors(cudaMalloc((void **)&d_PaddedData, fsize_b));
checkCudaErrors(cudaMalloc((void **)&d_DataSpectrum, fftD * fftH * (fftW / 2 + 1) * sizeof(fComplex)));
#ifdef CALC_MEM_RQ
const size_t numGPUs = 1;
size_t workSizeFwd[numGPUs];
size_t workSizeInv[numGPUs];
checkCudaErrors(cufftEstimate3d(fftD, fftH, fftW, CUFFT_R2C, workSizeFwd));
checkCudaErrors(cufftEstimate3d(fftD, fftH, fftW, CUFFT_C2R, workSizeInv));
printf("Compute chem forward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeFwd[0]);
printf("Compute chem bckward size %d x %d x %d requires %d bytes\n", fftW, fftH, fftD, workSizeInv[0]);
//return true;
#endif
#ifdef PRINT_KERNEL
printf("...creating R2C & C2R FFT plans for %i x %i x %i\n", fftD, fftH, fftW);
#endif //PRINT_KERNEL
// checkCudaErrors(cufftPlan3d(&fftPlanFwd, fftD, fftH, fftW, CUFFT_R2C));
// checkCudaErrors(cufftPlan3d(&fftPlanInv, fftD, fftH, fftW, CUFFT_C2R));
#ifdef CALC_MEM_RQ
cufftGetSize3d(fftPlanFwd, fftD, fftH, fftW, CUFFT_R2C, workSizeFwd);
cufftGetSize3d(fftPlanInv, fftD, fftH, fftW, CUFFT_C2R, workSizeInv);
printf("Compute kernel forward size %d x %d x %d requires %f GB\n", fftW, fftH, fftD,
(float) workSizeFwd[0]/(1024.0*1024.0*1024.0));
printf("Compute kernel bckward size %d x %d x %d requires %f GB\n", fftW, fftH, fftD,
(float) workSizeInv[0]/(1024.0*1024.0*1024.0));
//return true;
#endif
firstTransferCompleted = true;
checkCudaErrors(cudaMemset(d_PaddedData, 0, fsize_b));
/********************************************
* Pad data *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// padDataClampToBorder3D(
// d_PaddedData,
// d_Data,
// fftD,
// fftH,
// fftW,
// dataD,
// dataH,
// dataW,
// kernelD,
// kernelH,
// kernelW,
// kernelZ,
// kernelY,
// kernelX
// );
// DEBUG rat
padDataConstantVF3D(
d_PaddedData,
d_Data,
fftD,
fftH,
fftW,
dataD,
dataH,
dataW,
kernelD,
kernelH,
kernelW,
kernelZ,
kernelY,
kernelX,
epiBoundary,
baseChem
);
#ifdef PRINT_KERNEL
printf("...performing convolution\n");
#endif // PRINT_KERNEL
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double padTime = sdkGetTimerValue(&hTimer);
// --------- Computing convolution ------------ begin
/********************************************
* Compute FFT{data} *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// d_DataSpectrum = FFT{d_PaddedData}
checkCudaErrors(cufftExecR2C(fftPlanFwd,
(cufftReal *)d_PaddedData, (cufftComplex *)d_DataSpectrum));
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double fftTime = sdkGetTimerValue(&hTimer);
/********************************************
* Spectrum Point-wise Multiplication *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// d_DataSpectrum = d_DataSpectrum * d_KernelSpectrum
modulateAndNormalize3D(d_DataSpectrum, d_KernelSpectrum, fftD, fftH, fftW, 1);
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double multTime = sdkGetTimerValue(&hTimer);
/********************************************
* Compute IFFT{data_spectrum} *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
// d_PaddedData = IFFT{d_DataSpectrum}
checkCudaErrors(cufftExecC2R(fftPlanInv, (cufftComplex *)d_DataSpectrum, (cufftReal *)d_PaddedData));
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double ifftTime = sdkGetTimerValue(&hTimer);
// --------- Computing convolution ------------ end
/********************************************
* Unpad results *
********************************************/
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
#ifdef PRINT_KERNEL
printf("...removing result padding\n");
#endif // PRINT_KERNEL
unpadResult3D(
d_Data,
d_PaddedData,
dataD,
dataH,
dataW,
fftD,
fftH,
fftW
);
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
double unpadTime = sdkGetTimerValue(&hTimer);
/********************************************
* Execution Time Display *
********************************************/
printf("\t\t\t\t[%d] pad took: %f ms\n", devID, padTime);
printf("\t\t\t\t[%d] FFT took: %f ms\n", devID, fftTime);
printf("\t\t\t\t[%d] MULT took: %f ms\n", devID, multTime);
printf("\t\t\t\t[%d] IFFT took: %f ms\n", devID, ifftTime);
printf("\t\t\t\t[%d] unpad took: %f ms\n", devID, unpadTime);
/********************************************
* Deallocation of plans and memory *
********************************************/
sdkDeleteTimer(&hTimer);
// checkCudaErrors(cufftDestroy(fftPlanInv));
// checkCudaErrors(cufftDestroy(fftPlanFwd));
#ifdef PRINT_KERNEL
printf("...freeing device pointers\n");
#endif // PRINT_KERNEL
checkCudaErrors(cudaFree(d_PaddedData));
checkCudaErrors(cudaFree(d_DataSpectrum));
#ifdef PRINT_KERNEL
printf("...returning to main()\n");
#endif // PRINT_KERNEL
return bRetVal;
}
#endif // MODEL_3D
#endif // GPU_DIFFUSE (*)
|
7371989e4fd202ec69744e0a6b280a0caee48460.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* A NVBit tool, which will replay the program by examine the log file generated in record phase
* input is defined as memory copied from host to device and arguments of kernel
* output is defined as memory copied from device to host
*
* Yineng Yan (yinengy@umich.edu), 2020
*/
#include <stdio.h>
#include <string>
#include <vector>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cerrno>
#include <algorithm>
#include <sys/stat.h>
/* header for every nvbit tool */
#include "nvbit_tool.h"
/* interface of nvbit */
#include "nvbit.h"
/* nvbit utility functions */
#include "utils/utils.h"
int cudaMemcpy_input_count = 0;
int cudaMemcpy_output_count = 0;
int funcParams_count = 0;
void *recorded_mem;
const void *origin_srcHost;
/* read memory content recorded in record phase from files
* if is_input is 0, it means the mem is copied from device to host
* otherwise, the mem is copied from host to device
* a memory of BtyeCount will be allocated and returned
*/
void *get_recorded_mem(size_t ByteCount, int is_input) {
char filename[25]; // large enough for a counter
if (is_input) {
cudaMemcpy_input_count++;
sprintf(filename, "kernel_log/imem%d.bin", cudaMemcpy_input_count);
} else {
cudaMemcpy_output_count++;
sprintf(filename, "kernel_log/omem%d.bin", cudaMemcpy_output_count);
}
void *buffer = malloc(ByteCount);
std::ifstream file(filename, std::ios::in | std::ios::binary);
if (!file.is_open()) {
std::cerr << strerror(errno) << "failed to open file.\n";
exit(1);
}
file.read((char *) buffer, ByteCount);
if (!file) {
std::cerr << "only " << file.gcount() << " could be read from " << filename << std::endl;
exit(1);
}
file.close();
return buffer;
}
/* compare ptr with output in record phase */
void compare_mem(const void *ptr, size_t ByteCount) {
void *to_compare = get_recorded_mem(ByteCount, 0);
int is_equal = memcmp(ptr, to_compare, ByteCount);
if (is_equal != 0) {
std::cerr << cudaMemcpy_output_count << "th output doesn't match!\n";
}
free(to_compare);
}
/* load arguments from log files
* if it is not a pointer, its value with be saved to kernelParams
* TODO: user defined type is not support
*/
void replace_nonpointer_arguments(void **kernelParams) {
/* open log file */
funcParams_count++;
char filename[25];
sprintf(filename, "kernel_log/param%d.txt", funcParams_count);
std::ifstream file;
file.open(filename);
if (file.fail()) {
std::cerr << strerror(errno) << "failed to open file.\n";
exit(1);
}
int i;
std::string trash, type, line;
while (std::getline(file, line)) {
/* read type */
std::istringstream iss(line);
iss >> i;
std::getline(iss, trash, ',');
std::getline(iss, type, ',');
/* cast kernelParams based on parameter type and assign value to it
* it will gives the argument of the kernel function
* refer to https://en.wikipedia.org/wiki/C_data_types
*/
if (type.find('*') != std::string::npos) {
// the parameter is a pointer
continue;
} else if (type == "char") {
char value;
iss >> value;
*(((char **) kernelParams)[i]) = value;
} else if (type == "signedchar") {
signed char value;
iss >> value;
*(((signed char **) kernelParams)[i]) = value;
} else if (type == "unsignedchar") {
unsigned char value;
iss >> value;
*(((unsigned char **) kernelParams)[i]) = value;
} else if (type == "short" ||
type == "shortint" ||
type == "signedshort" ||
type == "signedshortint") {
// signed short
short value;
iss >> value;
*(((short **) kernelParams)[i]) = value;
} else if (type == "unsignedshort" ||
type == "unsigned short int") {
// unsigned short
unsigned short value;
iss >> value;
*(((unsigned short **) kernelParams)[i]) = value;
} else if (type == "int" ||
type == "signed" ||
type == "signedint") {
// signed int
int value;
iss >> value;
*(((int **) kernelParams)[i]) = value;
} else if (type == "unsigned" ||
type == "unsignedint") {
// unsigned int
unsigned value;
iss >> value;
*(((unsigned **) kernelParams)[i]) = value;
} else if (type == "long" ||
type == "longint" ||
type == "signedlong" ||
type == "signedlongint") {
// signed long
long value;
iss >> value;
*(((long **) kernelParams)[i]) = value;
} else if (type == "unsignedlong" ||
type == "unsignedlongint") {
// unsigned long
unsigned long value;
file >> value;
*(((unsigned long **) kernelParams)[i]) = value;
} else if (type == "longlong" ||
type == "longlongint" ||
type == "signedlonglong" ||
type == "signedlonglongint") {
// signed long long
signed long long value;
iss >> value;
*(((signed long long **) kernelParams)[i]) = value;
} else if (type == "unsignedlonglong" ||
type == "unsignedlonglongint") {
// unsigned long long
unsigned long long value;
iss >> value;
*(((unsigned long long **) kernelParams)[i]) = value;
} else if (type == "float") {
// float
float value;
iss >> value;
*(((float **) kernelParams)[i]) = value;
} else if (type == "double") {
// double
double value;
iss >> value;
*(((double **) kernelParams)[i]) = value;
} else if (type == "longdouble") {
// long double
long double value;
iss >> value;
*(((long double **) kernelParams)[i]) = value;
} else {
// TODO: implement more types
continue;
}
}
file.close();
}
/* This is triggered every time a hipMemcpy is called */
void nvbit_at_cuda_event(hipCtx_t ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, hipError_t *pStatus) {
if ((cbid == API_CUDA_cuMemcpyDtoH_v2) && is_exit) {
/* dump output, so can compare it with the output in record phase */
cuMemcpyDtoH_v2_params *p = (cuMemcpyDtoH_v2_params *)params;
compare_mem(p->dstHost, p->ByteCount);
} else if (cbid == API_CUDA_cuMemcpyHtoD_v2) {
cuMemcpyHtoD_v2_params *p = (cuMemcpyHtoD_v2_params *)params;
if (!is_exit) {
/* it is should be trigger at the begin of hipMemcpy__
* so that the memory content can be replace
* by the content in record phase */
recorded_mem = get_recorded_mem(p->ByteCount, 1);
origin_srcHost = p->srcHost;
p->srcHost = recorded_mem;
} else {
/* after hipMemcpy__, free the memory allocated by get_recorded_mem */
p->srcHost = origin_srcHost;
free(recorded_mem);
recorded_mem = nullptr;
origin_srcHost = nullptr;
}
} else if ((cbid == API_CUDA_cuLaunchKernel_ptsz ||
cbid == API_CUDA_cuLaunchKernel) && !is_exit) {
cuLaunchKernel_params *p = (cuLaunchKernel_params *)params;
replace_nonpointer_arguments(p->kernelParams);
}
} | 7371989e4fd202ec69744e0a6b280a0caee48460.cu | /*
*
* A NVBit tool, which will replay the program by examine the log file generated in record phase
* input is defined as memory copied from host to device and arguments of kernel
* output is defined as memory copied from device to host
*
* Yineng Yan (yinengy@umich.edu), 2020
*/
#include <stdio.h>
#include <string>
#include <vector>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cerrno>
#include <algorithm>
#include <sys/stat.h>
/* header for every nvbit tool */
#include "nvbit_tool.h"
/* interface of nvbit */
#include "nvbit.h"
/* nvbit utility functions */
#include "utils/utils.h"
int cudaMemcpy_input_count = 0;
int cudaMemcpy_output_count = 0;
int funcParams_count = 0;
void *recorded_mem;
const void *origin_srcHost;
/* read memory content recorded in record phase from files
* if is_input is 0, it means the mem is copied from device to host
* otherwise, the mem is copied from host to device
* a memory of BtyeCount will be allocated and returned
*/
void *get_recorded_mem(size_t ByteCount, int is_input) {
char filename[25]; // large enough for a counter
if (is_input) {
cudaMemcpy_input_count++;
sprintf(filename, "kernel_log/imem%d.bin", cudaMemcpy_input_count);
} else {
cudaMemcpy_output_count++;
sprintf(filename, "kernel_log/omem%d.bin", cudaMemcpy_output_count);
}
void *buffer = malloc(ByteCount);
std::ifstream file(filename, std::ios::in | std::ios::binary);
if (!file.is_open()) {
std::cerr << strerror(errno) << "failed to open file.\n";
exit(1);
}
file.read((char *) buffer, ByteCount);
if (!file) {
std::cerr << "only " << file.gcount() << " could be read from " << filename << std::endl;
exit(1);
}
file.close();
return buffer;
}
/* compare ptr with output in record phase */
void compare_mem(const void *ptr, size_t ByteCount) {
void *to_compare = get_recorded_mem(ByteCount, 0);
int is_equal = memcmp(ptr, to_compare, ByteCount);
if (is_equal != 0) {
std::cerr << cudaMemcpy_output_count << "th output doesn't match!\n";
}
free(to_compare);
}
/* load arguments from log files
* if it is not a pointer, its value with be saved to kernelParams
* TODO: user defined type is not support
*/
void replace_nonpointer_arguments(void **kernelParams) {
/* open log file */
funcParams_count++;
char filename[25];
sprintf(filename, "kernel_log/param%d.txt", funcParams_count);
std::ifstream file;
file.open(filename);
if (file.fail()) {
std::cerr << strerror(errno) << "failed to open file.\n";
exit(1);
}
int i;
std::string trash, type, line;
while (std::getline(file, line)) {
/* read type */
std::istringstream iss(line);
iss >> i;
std::getline(iss, trash, ',');
std::getline(iss, type, ',');
/* cast kernelParams based on parameter type and assign value to it
* it will gives the argument of the kernel function
* refer to https://en.wikipedia.org/wiki/C_data_types
*/
if (type.find('*') != std::string::npos) {
// the parameter is a pointer
continue;
} else if (type == "char") {
char value;
iss >> value;
*(((char **) kernelParams)[i]) = value;
} else if (type == "signedchar") {
signed char value;
iss >> value;
*(((signed char **) kernelParams)[i]) = value;
} else if (type == "unsignedchar") {
unsigned char value;
iss >> value;
*(((unsigned char **) kernelParams)[i]) = value;
} else if (type == "short" ||
type == "shortint" ||
type == "signedshort" ||
type == "signedshortint") {
// signed short
short value;
iss >> value;
*(((short **) kernelParams)[i]) = value;
} else if (type == "unsignedshort" ||
type == "unsigned short int") {
// unsigned short
unsigned short value;
iss >> value;
*(((unsigned short **) kernelParams)[i]) = value;
} else if (type == "int" ||
type == "signed" ||
type == "signedint") {
// signed int
int value;
iss >> value;
*(((int **) kernelParams)[i]) = value;
} else if (type == "unsigned" ||
type == "unsignedint") {
// unsigned int
unsigned value;
iss >> value;
*(((unsigned **) kernelParams)[i]) = value;
} else if (type == "long" ||
type == "longint" ||
type == "signedlong" ||
type == "signedlongint") {
// signed long
long value;
iss >> value;
*(((long **) kernelParams)[i]) = value;
} else if (type == "unsignedlong" ||
type == "unsignedlongint") {
// unsigned long
unsigned long value;
file >> value;
*(((unsigned long **) kernelParams)[i]) = value;
} else if (type == "longlong" ||
type == "longlongint" ||
type == "signedlonglong" ||
type == "signedlonglongint") {
// signed long long
signed long long value;
iss >> value;
*(((signed long long **) kernelParams)[i]) = value;
} else if (type == "unsignedlonglong" ||
type == "unsignedlonglongint") {
// unsigned long long
unsigned long long value;
iss >> value;
*(((unsigned long long **) kernelParams)[i]) = value;
} else if (type == "float") {
// float
float value;
iss >> value;
*(((float **) kernelParams)[i]) = value;
} else if (type == "double") {
// double
double value;
iss >> value;
*(((double **) kernelParams)[i]) = value;
} else if (type == "longdouble") {
// long double
long double value;
iss >> value;
*(((long double **) kernelParams)[i]) = value;
} else {
// TODO: implement more types
continue;
}
}
file.close();
}
/* This is triggered every time a cudaMemcpy is called */
void nvbit_at_cuda_event(CUcontext ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, CUresult *pStatus) {
if ((cbid == API_CUDA_cuMemcpyDtoH_v2) && is_exit) {
/* dump output, so can compare it with the output in record phase */
cuMemcpyDtoH_v2_params *p = (cuMemcpyDtoH_v2_params *)params;
compare_mem(p->dstHost, p->ByteCount);
} else if (cbid == API_CUDA_cuMemcpyHtoD_v2) {
cuMemcpyHtoD_v2_params *p = (cuMemcpyHtoD_v2_params *)params;
if (!is_exit) {
/* it is should be trigger at the begin of cuMemcpy
* so that the memory content can be replace
* by the content in record phase */
recorded_mem = get_recorded_mem(p->ByteCount, 1);
origin_srcHost = p->srcHost;
p->srcHost = recorded_mem;
} else {
/* after cuMemcpy, free the memory allocated by get_recorded_mem */
p->srcHost = origin_srcHost;
free(recorded_mem);
recorded_mem = nullptr;
origin_srcHost = nullptr;
}
} else if ((cbid == API_CUDA_cuLaunchKernel_ptsz ||
cbid == API_CUDA_cuLaunchKernel) && !is_exit) {
cuLaunchKernel_params *p = (cuLaunchKernel_params *)params;
replace_nonpointer_arguments(p->kernelParams);
}
} |
9399ed770194121b47a3dd650ce3e0288094cbe7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MarchingCubes.h"
#include "TsdfVolume.h"
#include "cudpp\thrust_wrapper.h"
#include "GpuMesh.h"
#include "device_utils.h"
namespace dfusion
{
#pragma region --marching cubes table data
enum{
TableSize = 256,
TableSize2 = 16
};
__constant__ int g_edgeTable[TableSize] = {
0x0, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33, 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa, 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66, 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff, 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55, 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55, 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66, 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa, 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33, 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99, 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 };
__constant__ char g_triTable[TableSize][TableSize2] =
{ { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1 },
{ 3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1 },
{ 4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1 },
{ 9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1 },
{ 10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1 },
{ 5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1 },
{ 8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1 },
{ 2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1 },
{ 11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1 },
{ 5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1 },
{ 11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1 },
{ 11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1 },
{ 6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1 },
{ 6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1 },
{ 8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1 },
{ 7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1 },
{ 3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1 },
{ 9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1 },
{ 8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1 },
{ 0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1 },
{ 6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1 },
{ 10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1 },
{ 0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1 },
{ 3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1 },
{ 9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1 },
{ 8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1 },
{ 3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1 },
{ 10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1 },
{ 10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1 },
{ 7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1 },
{ 1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1 },
{ 11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1 },
{ 8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1 },
{ 0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1 },
{ 7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1 },
{ 7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1 },
{ 10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1 },
{ 0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1 },
{ 7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1 },
{ 6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1 },
{ 4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1 },
{ 10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1 },
{ 8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1 },
{ 1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1 },
{ 10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1 },
{ 10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1 },
{ 9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1 },
{ 7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1 },
{ 3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1 },
{ 7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1 },
{ 3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1 },
{ 6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1 },
{ 9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1 },
{ 1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1 },
{ 4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1 },
{ 7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1 },
{ 6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1 },
{ 0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1 },
{ 6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1 },
{ 0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1 },
{ 11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1 },
{ 6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1 },
{ 5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1 },
{ 9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1 },
{ 1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1 },
{ 10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1 },
{ 0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1 },
{ 11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1 },
{ 9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1 },
{ 7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1 },
{ 2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1 },
{ 9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1 },
{ 1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1 },
{ 10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1 },
{ 2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1 },
{ 0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1 },
{ 0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1 },
{ 9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1 },
{ 5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1 },
{ 5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1 },
{ 9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1 },
{ 1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1 },
{ 3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1 },
{ 4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1 },
{ 9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1 },
{ 11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1 },
{ 2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1 },
{ 9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1 },
{ 3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1 },
{ 1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1 },
{ 4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1 },
{ 0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1 },
{ 1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 } };
// number of vertices for each case above
__constant__ char g_numVertsTable[TableSize] =
{
0,
3,
3,
6,
3,
6,
6,
9,
3,
6,
6,
9,
6,
9,
9,
6,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
6,
9,
9,
6,
9,
12,
12,
9,
9,
12,
12,
9,
12,
15,
15,
6,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
6,
9,
9,
12,
9,
12,
12,
15,
9,
12,
12,
15,
12,
15,
15,
12,
6,
9,
9,
12,
9,
12,
6,
9,
9,
12,
12,
15,
12,
15,
9,
6,
9,
12,
12,
9,
12,
15,
9,
6,
12,
15,
15,
12,
15,
6,
12,
3,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
6,
9,
9,
12,
9,
12,
12,
15,
9,
6,
12,
9,
12,
9,
15,
6,
6,
9,
9,
12,
9,
12,
12,
15,
9,
12,
12,
15,
12,
15,
15,
12,
9,
12,
12,
9,
12,
15,
15,
12,
12,
9,
15,
6,
15,
12,
6,
3,
6,
9,
9,
12,
9,
12,
12,
15,
9,
12,
12,
15,
6,
9,
9,
6,
9,
12,
12,
15,
12,
15,
15,
6,
12,
9,
15,
12,
9,
6,
12,
3,
9,
12,
12,
15,
12,
15,
9,
12,
12,
15,
15,
6,
9,
12,
6,
3,
6,
9,
9,
6,
9,
12,
6,
3,
9,
6,
12,
3,
6,
3,
3,
0,
};
#pragma endregion
#pragma region --classifyVoxel
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct OccupiedVoxels
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
CTA_SIZE_Z = 2,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y * CTA_SIZE_Z,
WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE
};
mutable unsigned int* voxels_indeces;
mutable unsigned int* vetexes_number;
hipTextureObject_t tex;
MarchingCubes::Tile tile;
float isoValue;
float minWeights;
__device__ __forceinline__ void operator () () const
{
const int tx = threadIdx.x + blockIdx.x * CTA_SIZE_X;
const int ty = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
const int tz = threadIdx.z + blockIdx.z * CTA_SIZE_Z;
const int x = (tx << tile.level) + tile.begin.x;
const int y = (ty << tile.level) + tile.begin.y;
const int z = (tz << tile.level) + tile.begin.z;
const int rx = ((tile.end.x - tile.begin.x) >> tile.level);
const int ry = ((tile.end.y - tile.begin.y) >> tile.level);
const int s = (1 << tile.level);
if (x >= tile.end.x || y >= tile.end.y || z >= tile.end.z)
return;
int ftid = Block::flattenedThreadId();
int warp_id = Warp::id();
int lane_id = Warp::laneId();
volatile __shared__ int warps_buffer[WARPS_COUNT];
float field[8];
float2 tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + 0, z + 0));
field[0] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + 0, z + 0));
field[1] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + s, z + 0));
field[2] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + s, z + 0));
field[3] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + 0, z + s));
field[4] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + 0, z + s));
field[5] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + s, z + s));
field[6] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + s, z + s));
field[7] = tdata.x * (tdata.y >= minWeights);
int cubeindex = 0;
if (field[0] && field[1] && field[2] && field[3] && field[4]
&& field[5] && field[6] && field[7])// exactly 0 means no value, thus should be ignored
{
cubeindex |= (int(field[0] < isoValue) << 0);
cubeindex |= (int(field[1] < isoValue) << 1);// * 2;
cubeindex |= (int(field[2] < isoValue) << 2);// * 4;
cubeindex |= (int(field[3] < isoValue) << 3);// * 8;
cubeindex |= (int(field[4] < isoValue) << 4);// * 16;
cubeindex |= (int(field[5] < isoValue) << 5);// * 32;
cubeindex |= (int(field[6] < isoValue) << 6);// * 64;
cubeindex |= (int(field[7] < isoValue) << 7);// * 128;
}
int numVerts = g_numVertsTable[cubeindex];
int total = __popc(__ballot(numVerts > 0));
if (total)
{
if (lane_id == 0)
{
int old = atomicAdd(&global_count, total);
warps_buffer[warp_id] = old;
}
int old_global_voxels_count = warps_buffer[warp_id];
int offs = Warp::binaryExclScan(__ballot(numVerts > 0));
if (old_global_voxels_count + offs < tile.max_num_activeVoxels && numVerts > 0)
{
voxels_indeces[old_global_voxels_count + offs] = ry*rx * tz + ty*rx + tx;
vetexes_number[old_global_voxels_count + offs] = numVerts;
}
}
/////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc(&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min(tile.max_num_activeVoxels, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator () */
};
__global__ void getOccupiedVoxelsKernel(const OccupiedVoxels ov) { ov(); }
static unsigned int get_scanned_sum(unsigned int* d_ary, unsigned int* d_scan, int n)
{
if (n == 0)
return 0;
unsigned int lastElement, lastScanElement;
cudaSafeCall(hipMemcpy((void *)&lastElement,
(void *)(d_ary + n- 1),
sizeof(unsigned int), hipMemcpyDeviceToHost),
"get_scanned_sum 1");
cudaSafeCall(hipMemcpy((void *)&lastScanElement,
(void *)(d_scan + n - 1),
sizeof(unsigned int), hipMemcpyDeviceToHost),
"get_scanned_sum 2");
return lastElement + lastScanElement;
}
void MarchingCubes::classifyVoxel(Tile& tile)
{
int zero_mem = 0;
cudaSafeCall(hipMemcpyToSymbol(output_count, &zero_mem, sizeof(int)),
"MarchingCubes::classifyVoxel 1");
cudaSafeCall(hipMemcpyToSymbol(global_count, &zero_mem, sizeof(int)),
"MarchingCubes::classifyVoxel 2");
cudaSafeCall(hipMemcpyToSymbol(blocks_done, &zero_mem, sizeof(int)),
"MarchingCubes::classifyVoxel 3");
OccupiedVoxels ov;
ov.voxels_indeces = m_compVoxelArray.ptr();
ov.vetexes_number = m_voxelVerts.ptr();
ov.tex = m_volTex;
ov.tile = tile;
ov.isoValue = m_param.marching_cube_isoValue;
ov.minWeights = m_param.marchingCube_min_valied_weight;
dim3 block(OccupiedVoxels::CTA_SIZE_X, OccupiedVoxels::CTA_SIZE_Y,
OccupiedVoxels::CTA_SIZE_Z);
dim3 grid(divUp((tile.end.x - tile.begin.x) >> tile.level, block.x),
divUp((tile.end.y - tile.begin.y) >> tile.level, block.y),
divUp((tile.end.z - tile.begin.z) >> tile.level, block.z));
getOccupiedVoxelsKernel << <grid, block >> >(ov);
cudaSafeCall(hipGetLastError(),
"MarchingCubes::classifyVoxel getOccupiedVoxelsKernel");
cudaSafeCall(hipDeviceSynchronize(),
"MarchingCubes::classifyVoxel getOccupiedVoxelsKernel");
cudaSafeCall(hipMemcpyFromSymbol(&tile.num_activeVoxels, output_count, sizeof(int)),
"MarchingCubes::classifyVoxel 4");
if (tile.num_activeVoxels == tile.max_num_activeVoxels)
{
printf("warning: memory limit achieved in marching cube, you may enlarge \
marching_cube_max_activeVoxel_ratio in Param()\n");
}
// scan to get total number of vertices
thrust_wrapper::exclusive_scan(m_voxelVerts.ptr(), m_voxelVertsScan.ptr(), tile.num_activeVoxels);
tile.nverts = get_scanned_sum(m_voxelVerts.ptr(), m_voxelVertsScan.ptr(), tile.num_activeVoxels);
}
#pragma endregion
#pragma region --generate triangles
enum{
GEN_TRI_N_THREADS = 32
};
__device__ __forceinline__ float3 lerp(float3 a, float3 b, float t)
{
return a + t*(b - a);
}
__device__ __forceinline__ float4 lerp(float4 a, float4 b, float t)
{
return a + t*(b - a);
}
// compute interpolated vertex along an edge
__device__ __forceinline__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1)
{
float t = (isolevel - f0) / (f1 - f0);
return dfusion::lerp(p0, p1, t);
}
__device__ __forceinline__ float4 colorInterp(float isolevel, float4 p0, float4 p1, float f0, float f1)
{
float t = (isolevel - f0) / (f1 - f0);
return dfusion::lerp(p0, p1, t) * COLOR_FUSION_BRIGHTNESS; // make it brighter
}
// calculate triangle normal
__device__ __forceinline__ float3 calcNormal(float3 *v0, float3 *v1, float3 *v2)
{
float3 edge0 = *v1 - *v0;
float3 edge1 = *v2 - *v0;
// note - it's faster to perform normalization in vertex shader rather than here
return normalize(cross(edge0, edge1));
}
// version that calculates flat surface normal for each triangle
__global__ void generateTrianglesKernel(GpuMesh::PointType *pos, GpuMesh::PointType *norm,
hipTextureObject_t tex, MarchingCubes::Tile tile,
unsigned int *compactedVoxelArray, unsigned int *numVertsScanned, float isoValue, float minWeights
#ifdef ENABLE_COLOR_FUSION
,GpuMesh::PointType* color
#endif
)
{
unsigned int blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
unsigned int tid = __mul24(blockId, blockDim.x<<3) + threadIdx.x;
const int rx = ((tile.end.x - tile.begin.x) >> tile.level);
const int ry = ((tile.end.y - tile.begin.y) >> tile.level);
const int s = (1 << tile.level);
const float svsz = tile.voxelSize*s;
const int rxry = rx*ry;
for (int block_iter = 0; block_iter < 8; block_iter++, tid += blockDim.x)
{
// cannot return due to __syncthreads()
if (tid < tile.num_activeVoxels)
{
unsigned int voxelId = compactedVoxelArray[tid];
// compute position in 3d grid
uint3 gridPos;
gridPos.z = voxelId / rxry;
gridPos.y = (voxelId - gridPos.z*rxry) / rx;
gridPos.x = voxelId % rx;
gridPos.x = tile.begin.x + (gridPos.x << tile.level);
gridPos.y = tile.begin.y + (gridPos.y << tile.level);
gridPos.z = tile.begin.z + (gridPos.z << tile.level);
// calculate cell vertex positions
float3 v[8];
float field[8];
v[0] = make_float3(tile.origion.x + gridPos.x * tile.voxelSize,
tile.origion.y + gridPos.y * tile.voxelSize,
tile.origion.z + gridPos.z * tile.voxelSize);
v[1] = make_float3(v[0].x + svsz, v[0].y, v[0].z);
v[2] = make_float3(v[0].x + svsz, v[0].y + svsz, v[0].z);
v[3] = make_float3(v[0].x, v[0].y + svsz, v[0].z);
v[4] = make_float3(v[0].x, v[0].y, v[0].z + svsz);
v[5] = make_float3(v[0].x + svsz, v[0].y, v[0].z + svsz);
v[6] = make_float3(v[0].x + svsz, v[0].y + svsz, v[0].z + svsz);
v[7] = make_float3(v[0].x, v[0].y + svsz, v[0].z + svsz);
#ifdef ENABLE_COLOR_FUSION
float4 c[8];
float2 tdata;
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + 0), tdata, c[0]);
field[0] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + 0), tdata, c[1]);
field[1] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + 0), tdata, c[2]);
field[2] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + 0), tdata, c[3]);
field[3] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + s), tdata, c[4]);
field[4] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + s), tdata, c[5]);
field[5] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + s), tdata, c[6]);
field[6] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + s), tdata, c[7]);
field[7] = tdata.x * (tdata.y >= minWeights);
#else
float2 tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + 0));
field[0] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + 0));
field[1] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + 0));
field[2] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + 0));
field[3] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + s));
field[4] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + s));
field[5] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + s));
field[6] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + s));
field[7] = tdata.x * (tdata.y >= minWeights);
#endif
// recalculate flag, faster than store in global memory
int cubeindex = 0;
if (field[0] && field[1] && field[2] && field[3] && field[4]
&& field[5] && field[6] && field[7])// exactly 0 means no value, thus should be ignored
{
cubeindex |= (int(field[0] < isoValue) << 0);
cubeindex |= (int(field[1] < isoValue) << 1);// * 2;
cubeindex |= (int(field[2] < isoValue) << 2);// * 4;
cubeindex |= (int(field[3] < isoValue) << 3);// * 8;
cubeindex |= (int(field[4] < isoValue) << 4);// * 16;
cubeindex |= (int(field[5] < isoValue) << 5);// * 32;
cubeindex |= (int(field[6] < isoValue) << 6);// * 64;
cubeindex |= (int(field[7] < isoValue) << 7);// * 128;
}
// find the vertices where the surface intersects the cube
// use shared memory to avoid using local
__shared__ float3 vertlist[12 * GEN_TRI_N_THREADS];
vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[GEN_TRI_N_THREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[(GEN_TRI_N_THREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[(GEN_TRI_N_THREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[(GEN_TRI_N_THREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[(GEN_TRI_N_THREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[(GEN_TRI_N_THREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[(GEN_TRI_N_THREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[(GEN_TRI_N_THREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[(GEN_TRI_N_THREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[(GEN_TRI_N_THREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[(GEN_TRI_N_THREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
#ifdef ENABLE_COLOR_FUSION
__shared__ float4 clorlist[12 * GEN_TRI_N_THREADS];
clorlist[threadIdx.x] = colorInterp(isoValue, c[0], c[1], field[0], field[1]);
clorlist[GEN_TRI_N_THREADS + threadIdx.x] = colorInterp(isoValue, c[1], c[2], field[1], field[2]);
clorlist[(GEN_TRI_N_THREADS * 2) + threadIdx.x] = colorInterp(isoValue, c[2], c[3], field[2], field[3]);
clorlist[(GEN_TRI_N_THREADS * 3) + threadIdx.x] = colorInterp(isoValue, c[3], c[0], field[3], field[0]);
clorlist[(GEN_TRI_N_THREADS * 4) + threadIdx.x] = colorInterp(isoValue, c[4], c[5], field[4], field[5]);
clorlist[(GEN_TRI_N_THREADS * 5) + threadIdx.x] = colorInterp(isoValue, c[5], c[6], field[5], field[6]);
clorlist[(GEN_TRI_N_THREADS * 6) + threadIdx.x] = colorInterp(isoValue, c[6], c[7], field[6], field[7]);
clorlist[(GEN_TRI_N_THREADS * 7) + threadIdx.x] = colorInterp(isoValue, c[7], c[4], field[7], field[4]);
clorlist[(GEN_TRI_N_THREADS * 8) + threadIdx.x] = colorInterp(isoValue, c[0], c[4], field[0], field[4]);
clorlist[(GEN_TRI_N_THREADS * 9) + threadIdx.x] = colorInterp(isoValue, c[1], c[5], field[1], field[5]);
clorlist[(GEN_TRI_N_THREADS * 10) + threadIdx.x] = colorInterp(isoValue, c[2], c[6], field[2], field[6]);
clorlist[(GEN_TRI_N_THREADS * 11) + threadIdx.x] = colorInterp(isoValue, c[3], c[7], field[3], field[7]);
#endif
__syncthreads();
// output triangle vertices
unsigned int numVerts = g_numVertsTable[cubeindex];
for (int i = 0; i < numVerts; i += 3)
{
unsigned int index = numVertsScanned[tid] + i;
float3 *v[3];
#ifdef ENABLE_COLOR_FUSION
float4 *c[3];
#endif
for (int k = 0; k < 3; k++)
{
unsigned int edge = g_triTable[cubeindex][i + k];
v[2-k] = &vertlist[(edge*GEN_TRI_N_THREADS) + threadIdx.x];
#ifdef ENABLE_COLOR_FUSION
c[2-k] = &clorlist[(edge*GEN_TRI_N_THREADS) + threadIdx.x];
#endif
}
// calculate triangle surface normal
float3 n = calcNormal(v[0], v[1], v[2]);
if (index < tile.nverts - 2)
{
pos[index] = GpuMesh::to_point(*v[0]);
norm[index] = GpuMesh::to_point(n);
pos[index + 1] = GpuMesh::to_point(*v[1]);
norm[index + 1] = GpuMesh::to_point(n);
pos[index + 2] = GpuMesh::to_point(*v[2]);
norm[index + 2] = GpuMesh::to_point(n);
#ifdef ENABLE_COLOR_FUSION
color[index] = *c[0];
color[index + 1] = *c[1];
color[index + 2] = *c[2];
#endif
}
}// end for i
}// end if tid < activeVoxels
}// end for block_iter
}
void MarchingCubes::generateTriangles(const Tile& tile, GpuMesh& result)
{
result.create(tile.nverts);
if (tile.nverts == 0)
return;
dim3 block(GEN_TRI_N_THREADS);
dim3 grid(divUp(tile.num_activeVoxels, block.x<<3));
result.lockVertsNormals();
generateTrianglesKernel << <grid, block >> >(
result.verts(), result.normals(),
m_volTex, tile,
m_compVoxelArray, m_voxelVertsScan,
m_param.marching_cube_isoValue,
m_param.marchingCube_min_valied_weight
#ifdef ENABLE_COLOR_FUSION
,result.colors()
#endif
);
cudaSafeCall(hipGetLastError(), "generateTriangles");
result.unlockVertsNormals();
}
#pragma endregion
} | 9399ed770194121b47a3dd650ce3e0288094cbe7.cu | #include "MarchingCubes.h"
#include "TsdfVolume.h"
#include "cudpp\thrust_wrapper.h"
#include "GpuMesh.h"
#include "device_utils.h"
namespace dfusion
{
#pragma region --marching cubes table data
enum{
TableSize = 256,
TableSize2 = 16
};
__constant__ int g_edgeTable[TableSize] = {
0x0, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33, 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa, 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66, 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff, 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55, 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55, 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66, 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa, 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33, 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99, 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 };
__constant__ char g_triTable[TableSize][TableSize2] =
{ { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1 },
{ 3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1 },
{ 4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1 },
{ 9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1 },
{ 10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1 },
{ 5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1 },
{ 8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1 },
{ 2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1 },
{ 11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1 },
{ 5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1 },
{ 11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1 },
{ 11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1 },
{ 6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1 },
{ 6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1 },
{ 8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1 },
{ 7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1 },
{ 3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1 },
{ 9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1 },
{ 8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1 },
{ 0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1 },
{ 6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1 },
{ 10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1 },
{ 0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1 },
{ 3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1 },
{ 9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1 },
{ 8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1 },
{ 3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1 },
{ 10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1 },
{ 10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1 },
{ 7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1 },
{ 1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1 },
{ 11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1 },
{ 8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1 },
{ 0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1 },
{ 7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1 },
{ 7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1 },
{ 10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1 },
{ 0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1 },
{ 7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1 },
{ 6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1 },
{ 4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1 },
{ 10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1 },
{ 8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1 },
{ 1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1 },
{ 10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1 },
{ 10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1 },
{ 9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1 },
{ 7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1 },
{ 3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1 },
{ 7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1 },
{ 3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1 },
{ 6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1 },
{ 9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1 },
{ 1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1 },
{ 4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1 },
{ 7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1 },
{ 6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1 },
{ 0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1 },
{ 6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1 },
{ 0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1 },
{ 11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1 },
{ 6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1 },
{ 5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1 },
{ 9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1 },
{ 1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1 },
{ 10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1 },
{ 0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1 },
{ 11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1 },
{ 9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1 },
{ 7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1 },
{ 2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1 },
{ 9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1 },
{ 1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1 },
{ 10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1 },
{ 2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1 },
{ 0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1 },
{ 0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1 },
{ 9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1 },
{ 5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1 },
{ 5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1 },
{ 9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1 },
{ 1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1 },
{ 3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1 },
{ 4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1 },
{ 9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1 },
{ 11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1 },
{ 2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1 },
{ 9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1 },
{ 3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1 },
{ 1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1 },
{ 4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1 },
{ 0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1 },
{ 1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 } };
// number of vertices for each case above
__constant__ char g_numVertsTable[TableSize] =
{
0,
3,
3,
6,
3,
6,
6,
9,
3,
6,
6,
9,
6,
9,
9,
6,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
6,
9,
9,
6,
9,
12,
12,
9,
9,
12,
12,
9,
12,
15,
15,
6,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
6,
9,
9,
12,
9,
12,
12,
15,
9,
12,
12,
15,
12,
15,
15,
12,
6,
9,
9,
12,
9,
12,
6,
9,
9,
12,
12,
15,
12,
15,
9,
6,
9,
12,
12,
9,
12,
15,
9,
6,
12,
15,
15,
12,
15,
6,
12,
3,
3,
6,
6,
9,
6,
9,
9,
12,
6,
9,
9,
12,
9,
12,
12,
9,
6,
9,
9,
12,
9,
12,
12,
15,
9,
6,
12,
9,
12,
9,
15,
6,
6,
9,
9,
12,
9,
12,
12,
15,
9,
12,
12,
15,
12,
15,
15,
12,
9,
12,
12,
9,
12,
15,
15,
12,
12,
9,
15,
6,
15,
12,
6,
3,
6,
9,
9,
12,
9,
12,
12,
15,
9,
12,
12,
15,
6,
9,
9,
6,
9,
12,
12,
15,
12,
15,
15,
6,
12,
9,
15,
12,
9,
6,
12,
3,
9,
12,
12,
15,
12,
15,
9,
12,
12,
15,
15,
6,
9,
12,
6,
3,
6,
9,
9,
6,
9,
12,
6,
3,
9,
6,
12,
3,
6,
3,
3,
0,
};
#pragma endregion
#pragma region --classifyVoxel
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct OccupiedVoxels
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
CTA_SIZE_Z = 2,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y * CTA_SIZE_Z,
WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE
};
mutable unsigned int* voxels_indeces;
mutable unsigned int* vetexes_number;
cudaTextureObject_t tex;
MarchingCubes::Tile tile;
float isoValue;
float minWeights;
__device__ __forceinline__ void operator () () const
{
const int tx = threadIdx.x + blockIdx.x * CTA_SIZE_X;
const int ty = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
const int tz = threadIdx.z + blockIdx.z * CTA_SIZE_Z;
const int x = (tx << tile.level) + tile.begin.x;
const int y = (ty << tile.level) + tile.begin.y;
const int z = (tz << tile.level) + tile.begin.z;
const int rx = ((tile.end.x - tile.begin.x) >> tile.level);
const int ry = ((tile.end.y - tile.begin.y) >> tile.level);
const int s = (1 << tile.level);
if (x >= tile.end.x || y >= tile.end.y || z >= tile.end.z)
return;
int ftid = Block::flattenedThreadId();
int warp_id = Warp::id();
int lane_id = Warp::laneId();
volatile __shared__ int warps_buffer[WARPS_COUNT];
float field[8];
float2 tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + 0, z + 0));
field[0] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + 0, z + 0));
field[1] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + s, z + 0));
field[2] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + s, z + 0));
field[3] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + 0, z + s));
field[4] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + 0, z + s));
field[5] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + s, y + s, z + s));
field[6] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, x + 0, y + s, z + s));
field[7] = tdata.x * (tdata.y >= minWeights);
int cubeindex = 0;
if (field[0] && field[1] && field[2] && field[3] && field[4]
&& field[5] && field[6] && field[7])// exactly 0 means no value, thus should be ignored
{
cubeindex |= (int(field[0] < isoValue) << 0);
cubeindex |= (int(field[1] < isoValue) << 1);// * 2;
cubeindex |= (int(field[2] < isoValue) << 2);// * 4;
cubeindex |= (int(field[3] < isoValue) << 3);// * 8;
cubeindex |= (int(field[4] < isoValue) << 4);// * 16;
cubeindex |= (int(field[5] < isoValue) << 5);// * 32;
cubeindex |= (int(field[6] < isoValue) << 6);// * 64;
cubeindex |= (int(field[7] < isoValue) << 7);// * 128;
}
int numVerts = g_numVertsTable[cubeindex];
int total = __popc(__ballot(numVerts > 0));
if (total)
{
if (lane_id == 0)
{
int old = atomicAdd(&global_count, total);
warps_buffer[warp_id] = old;
}
int old_global_voxels_count = warps_buffer[warp_id];
int offs = Warp::binaryExclScan(__ballot(numVerts > 0));
if (old_global_voxels_count + offs < tile.max_num_activeVoxels && numVerts > 0)
{
voxels_indeces[old_global_voxels_count + offs] = ry*rx * tz + ty*rx + tx;
vetexes_number[old_global_voxels_count + offs] = numVerts;
}
}
/////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc(&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min(tile.max_num_activeVoxels, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator () */
};
__global__ void getOccupiedVoxelsKernel(const OccupiedVoxels ov) { ov(); }
static unsigned int get_scanned_sum(unsigned int* d_ary, unsigned int* d_scan, int n)
{
if (n == 0)
return 0;
unsigned int lastElement, lastScanElement;
cudaSafeCall(cudaMemcpy((void *)&lastElement,
(void *)(d_ary + n- 1),
sizeof(unsigned int), cudaMemcpyDeviceToHost),
"get_scanned_sum 1");
cudaSafeCall(cudaMemcpy((void *)&lastScanElement,
(void *)(d_scan + n - 1),
sizeof(unsigned int), cudaMemcpyDeviceToHost),
"get_scanned_sum 2");
return lastElement + lastScanElement;
}
void MarchingCubes::classifyVoxel(Tile& tile)
{
int zero_mem = 0;
cudaSafeCall(cudaMemcpyToSymbol(output_count, &zero_mem, sizeof(int)),
"MarchingCubes::classifyVoxel 1");
cudaSafeCall(cudaMemcpyToSymbol(global_count, &zero_mem, sizeof(int)),
"MarchingCubes::classifyVoxel 2");
cudaSafeCall(cudaMemcpyToSymbol(blocks_done, &zero_mem, sizeof(int)),
"MarchingCubes::classifyVoxel 3");
OccupiedVoxels ov;
ov.voxels_indeces = m_compVoxelArray.ptr();
ov.vetexes_number = m_voxelVerts.ptr();
ov.tex = m_volTex;
ov.tile = tile;
ov.isoValue = m_param.marching_cube_isoValue;
ov.minWeights = m_param.marchingCube_min_valied_weight;
dim3 block(OccupiedVoxels::CTA_SIZE_X, OccupiedVoxels::CTA_SIZE_Y,
OccupiedVoxels::CTA_SIZE_Z);
dim3 grid(divUp((tile.end.x - tile.begin.x) >> tile.level, block.x),
divUp((tile.end.y - tile.begin.y) >> tile.level, block.y),
divUp((tile.end.z - tile.begin.z) >> tile.level, block.z));
getOccupiedVoxelsKernel << <grid, block >> >(ov);
cudaSafeCall(cudaGetLastError(),
"MarchingCubes::classifyVoxel getOccupiedVoxelsKernel");
cudaSafeCall(cudaDeviceSynchronize(),
"MarchingCubes::classifyVoxel getOccupiedVoxelsKernel");
cudaSafeCall(cudaMemcpyFromSymbol(&tile.num_activeVoxels, output_count, sizeof(int)),
"MarchingCubes::classifyVoxel 4");
if (tile.num_activeVoxels == tile.max_num_activeVoxels)
{
printf("warning: memory limit achieved in marching cube, you may enlarge \
marching_cube_max_activeVoxel_ratio in Param()\n");
}
// scan to get total number of vertices
thrust_wrapper::exclusive_scan(m_voxelVerts.ptr(), m_voxelVertsScan.ptr(), tile.num_activeVoxels);
tile.nverts = get_scanned_sum(m_voxelVerts.ptr(), m_voxelVertsScan.ptr(), tile.num_activeVoxels);
}
#pragma endregion
#pragma region --generate triangles
enum{
GEN_TRI_N_THREADS = 32
};
__device__ __forceinline__ float3 lerp(float3 a, float3 b, float t)
{
return a + t*(b - a);
}
__device__ __forceinline__ float4 lerp(float4 a, float4 b, float t)
{
return a + t*(b - a);
}
// compute interpolated vertex along an edge
__device__ __forceinline__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1)
{
float t = (isolevel - f0) / (f1 - f0);
return dfusion::lerp(p0, p1, t);
}
__device__ __forceinline__ float4 colorInterp(float isolevel, float4 p0, float4 p1, float f0, float f1)
{
float t = (isolevel - f0) / (f1 - f0);
return dfusion::lerp(p0, p1, t) * COLOR_FUSION_BRIGHTNESS; // make it brighter
}
// calculate triangle normal
__device__ __forceinline__ float3 calcNormal(float3 *v0, float3 *v1, float3 *v2)
{
float3 edge0 = *v1 - *v0;
float3 edge1 = *v2 - *v0;
// note - it's faster to perform normalization in vertex shader rather than here
return normalize(cross(edge0, edge1));
}
// version that calculates flat surface normal for each triangle
__global__ void generateTrianglesKernel(GpuMesh::PointType *pos, GpuMesh::PointType *norm,
cudaTextureObject_t tex, MarchingCubes::Tile tile,
unsigned int *compactedVoxelArray, unsigned int *numVertsScanned, float isoValue, float minWeights
#ifdef ENABLE_COLOR_FUSION
,GpuMesh::PointType* color
#endif
)
{
unsigned int blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
unsigned int tid = __mul24(blockId, blockDim.x<<3) + threadIdx.x;
const int rx = ((tile.end.x - tile.begin.x) >> tile.level);
const int ry = ((tile.end.y - tile.begin.y) >> tile.level);
const int s = (1 << tile.level);
const float svsz = tile.voxelSize*s;
const int rxry = rx*ry;
for (int block_iter = 0; block_iter < 8; block_iter++, tid += blockDim.x)
{
// cannot return due to __syncthreads()
if (tid < tile.num_activeVoxels)
{
unsigned int voxelId = compactedVoxelArray[tid];
// compute position in 3d grid
uint3 gridPos;
gridPos.z = voxelId / rxry;
gridPos.y = (voxelId - gridPos.z*rxry) / rx;
gridPos.x = voxelId % rx;
gridPos.x = tile.begin.x + (gridPos.x << tile.level);
gridPos.y = tile.begin.y + (gridPos.y << tile.level);
gridPos.z = tile.begin.z + (gridPos.z << tile.level);
// calculate cell vertex positions
float3 v[8];
float field[8];
v[0] = make_float3(tile.origion.x + gridPos.x * tile.voxelSize,
tile.origion.y + gridPos.y * tile.voxelSize,
tile.origion.z + gridPos.z * tile.voxelSize);
v[1] = make_float3(v[0].x + svsz, v[0].y, v[0].z);
v[2] = make_float3(v[0].x + svsz, v[0].y + svsz, v[0].z);
v[3] = make_float3(v[0].x, v[0].y + svsz, v[0].z);
v[4] = make_float3(v[0].x, v[0].y, v[0].z + svsz);
v[5] = make_float3(v[0].x + svsz, v[0].y, v[0].z + svsz);
v[6] = make_float3(v[0].x + svsz, v[0].y + svsz, v[0].z + svsz);
v[7] = make_float3(v[0].x, v[0].y + svsz, v[0].z + svsz);
#ifdef ENABLE_COLOR_FUSION
float4 c[8];
float2 tdata;
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + 0), tdata, c[0]);
field[0] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + 0), tdata, c[1]);
field[1] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + 0), tdata, c[2]);
field[2] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + 0), tdata, c[3]);
field[3] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + s), tdata, c[4]);
field[4] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + s), tdata, c[5]);
field[5] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + s), tdata, c[6]);
field[6] = tdata.x * (tdata.y >= minWeights);
unpack_tsdf_vw_rgba(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + s), tdata, c[7]);
field[7] = tdata.x * (tdata.y >= minWeights);
#else
float2 tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + 0));
field[0] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + 0));
field[1] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + 0));
field[2] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + 0));
field[3] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + 0, gridPos.z + s));
field[4] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + 0, gridPos.z + s));
field[5] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + s, gridPos.y + s, gridPos.z + s));
field[6] = tdata.x * (tdata.y >= minWeights);
tdata = unpack_tsdf(read_tsdf_texture(tex, gridPos.x + 0, gridPos.y + s, gridPos.z + s));
field[7] = tdata.x * (tdata.y >= minWeights);
#endif
// recalculate flag, faster than store in global memory
int cubeindex = 0;
if (field[0] && field[1] && field[2] && field[3] && field[4]
&& field[5] && field[6] && field[7])// exactly 0 means no value, thus should be ignored
{
cubeindex |= (int(field[0] < isoValue) << 0);
cubeindex |= (int(field[1] < isoValue) << 1);// * 2;
cubeindex |= (int(field[2] < isoValue) << 2);// * 4;
cubeindex |= (int(field[3] < isoValue) << 3);// * 8;
cubeindex |= (int(field[4] < isoValue) << 4);// * 16;
cubeindex |= (int(field[5] < isoValue) << 5);// * 32;
cubeindex |= (int(field[6] < isoValue) << 6);// * 64;
cubeindex |= (int(field[7] < isoValue) << 7);// * 128;
}
// find the vertices where the surface intersects the cube
// use shared memory to avoid using local
__shared__ float3 vertlist[12 * GEN_TRI_N_THREADS];
vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[GEN_TRI_N_THREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[(GEN_TRI_N_THREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[(GEN_TRI_N_THREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[(GEN_TRI_N_THREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[(GEN_TRI_N_THREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[(GEN_TRI_N_THREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[(GEN_TRI_N_THREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[(GEN_TRI_N_THREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[(GEN_TRI_N_THREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[(GEN_TRI_N_THREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[(GEN_TRI_N_THREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
#ifdef ENABLE_COLOR_FUSION
__shared__ float4 clorlist[12 * GEN_TRI_N_THREADS];
clorlist[threadIdx.x] = colorInterp(isoValue, c[0], c[1], field[0], field[1]);
clorlist[GEN_TRI_N_THREADS + threadIdx.x] = colorInterp(isoValue, c[1], c[2], field[1], field[2]);
clorlist[(GEN_TRI_N_THREADS * 2) + threadIdx.x] = colorInterp(isoValue, c[2], c[3], field[2], field[3]);
clorlist[(GEN_TRI_N_THREADS * 3) + threadIdx.x] = colorInterp(isoValue, c[3], c[0], field[3], field[0]);
clorlist[(GEN_TRI_N_THREADS * 4) + threadIdx.x] = colorInterp(isoValue, c[4], c[5], field[4], field[5]);
clorlist[(GEN_TRI_N_THREADS * 5) + threadIdx.x] = colorInterp(isoValue, c[5], c[6], field[5], field[6]);
clorlist[(GEN_TRI_N_THREADS * 6) + threadIdx.x] = colorInterp(isoValue, c[6], c[7], field[6], field[7]);
clorlist[(GEN_TRI_N_THREADS * 7) + threadIdx.x] = colorInterp(isoValue, c[7], c[4], field[7], field[4]);
clorlist[(GEN_TRI_N_THREADS * 8) + threadIdx.x] = colorInterp(isoValue, c[0], c[4], field[0], field[4]);
clorlist[(GEN_TRI_N_THREADS * 9) + threadIdx.x] = colorInterp(isoValue, c[1], c[5], field[1], field[5]);
clorlist[(GEN_TRI_N_THREADS * 10) + threadIdx.x] = colorInterp(isoValue, c[2], c[6], field[2], field[6]);
clorlist[(GEN_TRI_N_THREADS * 11) + threadIdx.x] = colorInterp(isoValue, c[3], c[7], field[3], field[7]);
#endif
__syncthreads();
// output triangle vertices
unsigned int numVerts = g_numVertsTable[cubeindex];
for (int i = 0; i < numVerts; i += 3)
{
unsigned int index = numVertsScanned[tid] + i;
float3 *v[3];
#ifdef ENABLE_COLOR_FUSION
float4 *c[3];
#endif
for (int k = 0; k < 3; k++)
{
unsigned int edge = g_triTable[cubeindex][i + k];
v[2-k] = &vertlist[(edge*GEN_TRI_N_THREADS) + threadIdx.x];
#ifdef ENABLE_COLOR_FUSION
c[2-k] = &clorlist[(edge*GEN_TRI_N_THREADS) + threadIdx.x];
#endif
}
// calculate triangle surface normal
float3 n = calcNormal(v[0], v[1], v[2]);
if (index < tile.nverts - 2)
{
pos[index] = GpuMesh::to_point(*v[0]);
norm[index] = GpuMesh::to_point(n);
pos[index + 1] = GpuMesh::to_point(*v[1]);
norm[index + 1] = GpuMesh::to_point(n);
pos[index + 2] = GpuMesh::to_point(*v[2]);
norm[index + 2] = GpuMesh::to_point(n);
#ifdef ENABLE_COLOR_FUSION
color[index] = *c[0];
color[index + 1] = *c[1];
color[index + 2] = *c[2];
#endif
}
}// end for i
}// end if tid < activeVoxels
}// end for block_iter
}
void MarchingCubes::generateTriangles(const Tile& tile, GpuMesh& result)
{
result.create(tile.nverts);
if (tile.nverts == 0)
return;
dim3 block(GEN_TRI_N_THREADS);
dim3 grid(divUp(tile.num_activeVoxels, block.x<<3));
result.lockVertsNormals();
generateTrianglesKernel << <grid, block >> >(
result.verts(), result.normals(),
m_volTex, tile,
m_compVoxelArray, m_voxelVertsScan,
m_param.marching_cube_isoValue,
m_param.marchingCube_min_valied_weight
#ifdef ENABLE_COLOR_FUSION
,result.colors()
#endif
);
cudaSafeCall(cudaGetLastError(), "generateTriangles");
result.unlockVertsNormals();
}
#pragma endregion
} |
23d4073252fece135694bcc27f83d4066c45575e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
author: cltian
date: 2018/4/11
refs:
http://answers.opencv.org/question/89050/passing-an-array-of-cvgpumat-to-a-cuda-kernel/
http://answers.opencv.org/question/8466/how-access-gpumat-in-a-kernel/
http://answers.opencv.org/question/26059/using-gpu-module-with-own-code/
https://stackoverflow.com/questions/6965465/how-to-convert-gpumat-to-cvmat-in-opencv
https://hk.saowen.com/a/43287d4469fb06d8e398c2b78572e8165133d687cac9082916ee12c69b44131d
*/
#include "test.h"
#include "helper_math.h"
using namespace cv;
using namespace cv::gpu;
__global__ void solveAllKernel(PtrStepSz<double>* pdSrc, double* sWgt, int cA1, int cA2, int cA3, int lvl) {
int d = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int x = threadIdx.z + blockDim.z * blockIdx.z;
if (d<cA1 && y<cA2 && x<cA3){
int curY = y;
int curX = x;
int curD = d;
double sum = 0;
for( int s = 0; s < lvl; s ++ ) {
//int idx = curY*imgW+curX;
double curCost = pdSrc[s*cA1+curD].ptr(curY)[curX];
//double curCost = smPyr[ s ]->costVol[ curD ].at<double>( curY, curX );
sum += sWgt[ s ] * curCost;
curY = curY / 2;
curX = curX / 2;
curD = ( curD + 1 ) / 2;
}
//int idxAss = assY*imgW+assX;
pdSrc[m*cA1+d].ptr(y)[x] = sum;
//newCosts[ m ][ assD ].at<double>( assY, assX ) = sum;
}
}
void caller_test(SSCA**& smPyr, double* invWgt, int lvl){
GpuMat* mats;
const int constVar1 = PY_LVL*smPyr[ 0 ]->maxDis;
const int constVar2 = smPyr[ 0 ]->maxDis;
PtrStepSz<double>* phSrc = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* phDst = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* pdSrc;
//PtrStepSz<double>* pdDst;
double *dinvWgt = new double[ PY_LVL * PY_LVL];
int i = 0;
for(int s=0; s<PY_LVL; s++)
for(int d=0; d<constVar2; d++)
mats[i++].upload(smPyr[s]->rcostVol[d]);
for (int i=0; i<constVar1; i++)
phSrc[i] = mats[i];
cudaCheckError(hipMalloc(&dinvWgt, sizeof(double)*PY_LVL*PY_LVL));
cudaCheckError(hipMalloc(&pdSrc, constVar1*sizeof(PtrStepSz<double>)));
//cudaCheckError(hipMalloc(&pdDst, constVar1*sizeof(PtrStepSz<double>)));
cudaCheckError(hipMemcpy(dinvWgt, invWgt, sizeof(double)*PY_LVL*PY_LVL, hipMemcpyHostToDevice));
cudaCheckError(hipMemcpy(pdSrc, phSrc, constVar1*sizeof(PtrStepSz<double>),
hipMemcpyHostToDevice));
dim3 block(8, 8, 8);
dim3 grid( (constVar2+block.x-1)/block.x, (hei+block.y-1)/block.y, (wid+block.z-1)/block.z);
hipLaunchKernelGGL(( solveAllKernel), dim3(grid), dim3(block), 0, 0, pdSrc, dinvWgt, constVar2, hei, wid, PY_LVL);
cudaCheckError(hipMemcpy(phDst, pdSrc, constVar1*sizeof(PtrStepSz<double>),
hipMemcpyDeviceToHost));
for (int i=0; i<constVar1; i++)
mats[i] = phDst[i];
for( int s = 0; s < PY_LVL; s ++ ) {
for( int d = 0; d < smPyr[ s ]->maxDis; d ++ ) {
//smPyr[ s ]->costVol[ d ] = pDistData[ s*PY_LVL+d ]
Mat tempMatrix;
mats[s*PY_LVL+d].download(tempMatrix);
smPyr[ s ]->costVol[ d ] = tempMatrix.clone();
}
}
cudaCheckError(hipFree(pdSrc));
cudaCheckError(hipFree(dinvWgt));
}
void caller_test(SSCA**& smPyr, double* invWgt, int lvl){
GpuMat* mats;
const int constVar1 = PY_LVL*smPyr[ 0 ]->maxDis;
const int constVar2 = smPyr[ 0 ]->maxDis;
PtrStepSz<double>* phSrc = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* phDst = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* pdSrc;
//PtrStepSz<double>* pdDst;
double *dinvWgt = new double[ PY_LVL * PY_LVL];
int i = 0;
for(int s=0; s<PY_LVL; s++)
for(int d=0; d<constVar2; d++)
mats[i++].upload(smPyr[s]->costVol[d]);
for (int i=0; i<constVar1; i++)
phSrc[i] = mats[i];
cudaCheckError(hipMalloc(&dinvWgt, sizeof(double)*PY_LVL*PY_LVL));
cudaCheckError(hipMalloc(&pdSrc, constVar1*sizeof(PtrStepSz<double>)));
//cudaCheckError(hipMalloc(&pdDst, constVar1*sizeof(PtrStepSz<double>)));
cudaCheckError(hipMemcpy(dinvWgt, invWgt, sizeof(double)*PY_LVL*PY_LVL, hipMemcpyHostToDevice));
cudaCheckError(hipMemcpy(pdSrc, phSrc, constVar1*sizeof(PtrStepSz<double>),
hipMemcpyHostToDevice));
dim3 block(8, 8, 8);
dim3 grid( (constVar2+block.x-1)/block.x, (hei+block.y-1)/block.y, (wid+block.z-1)/block.z);
hipLaunchKernelGGL(( solveAllKernel), dim3(grid), dim3(block), 0, 0, pdSrc, dinvWgt, constVar2, hei, wid, PY_LVL);
cudaCheckError(hipMemcpy(phDst, pdSrc, constVar1*sizeof(PtrStepSz<double>),
hipMemcpyDeviceToHost));
for (int i=0; i<constVar1; i++)
mats[i] = phDst[i];
for( int s = 0; s < PY_LVL; s ++ ) {
for( int d = 0; d < smPyr[ s ]->maxDis; d ++ ) {
//smPyr[ s ]->costVol[ d ] = pDistData[ s*PY_LVL+d ]
Mat tempMatrix;
mats[s*PY_LVL+d].download(tempMatrix);
smPyr[ s ]->costVol[ d ] = tempMatrix.clone();
}
}
cudaCheckError(hipFree(pdSrc));
cudaCheckError(hipFree(dinvWgt));
}
| 23d4073252fece135694bcc27f83d4066c45575e.cu | /*
author: cltian
date: 2018/4/11
refs:
http://answers.opencv.org/question/89050/passing-an-array-of-cvgpumat-to-a-cuda-kernel/
http://answers.opencv.org/question/8466/how-access-gpumat-in-a-kernel/
http://answers.opencv.org/question/26059/using-gpu-module-with-own-code/
https://stackoverflow.com/questions/6965465/how-to-convert-gpumat-to-cvmat-in-opencv
https://hk.saowen.com/a/43287d4469fb06d8e398c2b78572e8165133d687cac9082916ee12c69b44131d
*/
#include "test.h"
#include "helper_math.h"
using namespace cv;
using namespace cv::gpu;
__global__ void solveAllKernel(PtrStepSz<double>* pdSrc, double* sWgt, int cA1, int cA2, int cA3, int lvl) {
int d = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int x = threadIdx.z + blockDim.z * blockIdx.z;
if (d<cA1 && y<cA2 && x<cA3){
int curY = y;
int curX = x;
int curD = d;
double sum = 0;
for( int s = 0; s < lvl; s ++ ) {
//int idx = curY*imgW+curX;
double curCost = pdSrc[s*cA1+curD].ptr(curY)[curX];
//double curCost = smPyr[ s ]->costVol[ curD ].at<double>( curY, curX );
sum += sWgt[ s ] * curCost;
curY = curY / 2;
curX = curX / 2;
curD = ( curD + 1 ) / 2;
}
//int idxAss = assY*imgW+assX;
pdSrc[m*cA1+d].ptr(y)[x] = sum;
//newCosts[ m ][ assD ].at<double>( assY, assX ) = sum;
}
}
void caller_test(SSCA**& smPyr, double* invWgt, int lvl){
GpuMat* mats;
const int constVar1 = PY_LVL*smPyr[ 0 ]->maxDis;
const int constVar2 = smPyr[ 0 ]->maxDis;
PtrStepSz<double>* phSrc = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* phDst = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* pdSrc;
//PtrStepSz<double>* pdDst;
double *dinvWgt = new double[ PY_LVL * PY_LVL];
int i = 0;
for(int s=0; s<PY_LVL; s++)
for(int d=0; d<constVar2; d++)
mats[i++].upload(smPyr[s]->rcostVol[d]);
for (int i=0; i<constVar1; i++)
phSrc[i] = mats[i];
cudaCheckError(cudaMalloc(&dinvWgt, sizeof(double)*PY_LVL*PY_LVL));
cudaCheckError(cudaMalloc(&pdSrc, constVar1*sizeof(PtrStepSz<double>)));
//cudaCheckError(cudaMalloc(&pdDst, constVar1*sizeof(PtrStepSz<double>)));
cudaCheckError(cudaMemcpy(dinvWgt, invWgt, sizeof(double)*PY_LVL*PY_LVL, cudaMemcpyHostToDevice));
cudaCheckError(cudaMemcpy(pdSrc, phSrc, constVar1*sizeof(PtrStepSz<double>),
cudaMemcpyHostToDevice));
dim3 block(8, 8, 8);
dim3 grid( (constVar2+block.x-1)/block.x, (hei+block.y-1)/block.y, (wid+block.z-1)/block.z);
solveAllKernel<<<grid, block>>>(pdSrc, dinvWgt, constVar2, hei, wid, PY_LVL);
cudaCheckError(cudaMemcpy(phDst, pdSrc, constVar1*sizeof(PtrStepSz<double>),
cudaMemcpyDeviceToHost));
for (int i=0; i<constVar1; i++)
mats[i] = phDst[i];
for( int s = 0; s < PY_LVL; s ++ ) {
for( int d = 0; d < smPyr[ s ]->maxDis; d ++ ) {
//smPyr[ s ]->costVol[ d ] = pDistData[ s*PY_LVL+d ]
Mat tempMatrix;
mats[s*PY_LVL+d].download(tempMatrix);
smPyr[ s ]->costVol[ d ] = tempMatrix.clone();
}
}
cudaCheckError(cudaFree(pdSrc));
cudaCheckError(cudaFree(dinvWgt));
}
void caller_test(SSCA**& smPyr, double* invWgt, int lvl){
GpuMat* mats;
const int constVar1 = PY_LVL*smPyr[ 0 ]->maxDis;
const int constVar2 = smPyr[ 0 ]->maxDis;
PtrStepSz<double>* phSrc = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* phDst = new PtrStepSz<double>[constVar1];
PtrStepSz<double>* pdSrc;
//PtrStepSz<double>* pdDst;
double *dinvWgt = new double[ PY_LVL * PY_LVL];
int i = 0;
for(int s=0; s<PY_LVL; s++)
for(int d=0; d<constVar2; d++)
mats[i++].upload(smPyr[s]->costVol[d]);
for (int i=0; i<constVar1; i++)
phSrc[i] = mats[i];
cudaCheckError(cudaMalloc(&dinvWgt, sizeof(double)*PY_LVL*PY_LVL));
cudaCheckError(cudaMalloc(&pdSrc, constVar1*sizeof(PtrStepSz<double>)));
//cudaCheckError(cudaMalloc(&pdDst, constVar1*sizeof(PtrStepSz<double>)));
cudaCheckError(cudaMemcpy(dinvWgt, invWgt, sizeof(double)*PY_LVL*PY_LVL, cudaMemcpyHostToDevice));
cudaCheckError(cudaMemcpy(pdSrc, phSrc, constVar1*sizeof(PtrStepSz<double>),
cudaMemcpyHostToDevice));
dim3 block(8, 8, 8);
dim3 grid( (constVar2+block.x-1)/block.x, (hei+block.y-1)/block.y, (wid+block.z-1)/block.z);
solveAllKernel<<<grid, block>>>(pdSrc, dinvWgt, constVar2, hei, wid, PY_LVL);
cudaCheckError(cudaMemcpy(phDst, pdSrc, constVar1*sizeof(PtrStepSz<double>),
cudaMemcpyDeviceToHost));
for (int i=0; i<constVar1; i++)
mats[i] = phDst[i];
for( int s = 0; s < PY_LVL; s ++ ) {
for( int d = 0; d < smPyr[ s ]->maxDis; d ++ ) {
//smPyr[ s ]->costVol[ d ] = pDistData[ s*PY_LVL+d ]
Mat tempMatrix;
mats[s*PY_LVL+d].download(tempMatrix);
smPyr[ s ]->costVol[ d ] = tempMatrix.clone();
}
}
cudaCheckError(cudaFree(pdSrc));
cudaCheckError(cudaFree(dinvWgt));
}
|
host_functions.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_functions.h"
#include "cuda_common.h"
#include "helper_cuda.h"
void ReorderDataAndFindCellStart_Host(ZIndexGridCUDA& zgrid) {
int numBlocks, numThreads;
computeBlockSize(zgrid.numParticles, 256, numBlocks, numThreads);
hipMemset(zgrid.startIndices, CELL_EMPTY, zgrid.numCells*sizeof(uint));
int sharedMemSize = sizeof(int)*(numThreads+1);
hipLaunchKernelGGL(( ReorderDataAndFindCellStart) , dim3(numBlocks), dim3(numThreads), sharedMemSize, 0, zgrid);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: reorder data");
hipMemcpy(zgrid.particles, zgrid.reorderBuffer, sizeof(Particle)*zgrid.numParticles, hipMemcpyDeviceToDevice);
}
void ComputeParticleHash_Host(ZIndexGridCUDA& zgrid) {
int numBlocks, numThreads;
computeBlockSize(zgrid.numParticles, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ComputeParticleHash) , dim3(numBlocks), dim3(numThreads), 0, 0, zgrid);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed:compute particle hash");
}
void ComputeColorField_Host(
ZIndexGridCUDA& zgrid,
float spacing,
float infectRadius,
float normThres,
int neighborThres,
int* surfaceParticleMark) {
int numBlocks, numThreads;
computeBlockSize(zgrid.numParticles, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ComputeColorField), dim3(numBlocks), dim3(numThreads), 0, 0, zgrid,
spacing,
infectRadius,
normThres,
neighborThres,
surfaceParticleMark);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed:compute color field");
}
void ComputeScalarValues_Host(
ZIndexGridCUDA& zgrid,
SurfaceGridCUDA& sgrid,
float particleSpacing,
float infectRadius
) {
int numBlocks, numThreads;
computeBlockSize(sgrid.numSurfaceVertices, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ComputeScalarValues), dim3(numBlocks), dim3(numThreads), 0, 0,
zgrid,
sgrid,
particleSpacing,
infectRadius);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed:compute scalar values");
} | host_functions.cu |
#include "kernel_functions.h"
#include "cuda_common.h"
#include "helper_cuda.h"
void ReorderDataAndFindCellStart_Host(ZIndexGridCUDA& zgrid) {
int numBlocks, numThreads;
computeBlockSize(zgrid.numParticles, 256, numBlocks, numThreads);
cudaMemset(zgrid.startIndices, CELL_EMPTY, zgrid.numCells*sizeof(uint));
int sharedMemSize = sizeof(int)*(numThreads+1);
ReorderDataAndFindCellStart <<< numBlocks, numThreads, sharedMemSize>>>(zgrid);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: reorder data");
cudaMemcpy(zgrid.particles, zgrid.reorderBuffer, sizeof(Particle)*zgrid.numParticles, cudaMemcpyDeviceToDevice);
}
void ComputeParticleHash_Host(ZIndexGridCUDA& zgrid) {
int numBlocks, numThreads;
computeBlockSize(zgrid.numParticles, 256, numBlocks, numThreads);
ComputeParticleHash <<<numBlocks, numThreads>>> (zgrid);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed:compute particle hash");
}
void ComputeColorField_Host(
ZIndexGridCUDA& zgrid,
float spacing,
float infectRadius,
float normThres,
int neighborThres,
int* surfaceParticleMark) {
int numBlocks, numThreads;
computeBlockSize(zgrid.numParticles, 256, numBlocks, numThreads);
ComputeColorField<<<numBlocks, numThreads>>>(zgrid,
spacing,
infectRadius,
normThres,
neighborThres,
surfaceParticleMark);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed:compute color field");
}
void ComputeScalarValues_Host(
ZIndexGridCUDA& zgrid,
SurfaceGridCUDA& sgrid,
float particleSpacing,
float infectRadius
) {
int numBlocks, numThreads;
computeBlockSize(sgrid.numSurfaceVertices, 256, numBlocks, numThreads);
ComputeScalarValues<<<numBlocks, numThreads>>>(
zgrid,
sgrid,
particleSpacing,
infectRadius);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed:compute scalar values");
} |
a31d0997ea1781b510aaa3edad86c6c99ea20092.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 10
#define BLOCK_WIDTH 1000
void print_array(int *array, int size) {
for (int i = 0 ; i < size ; i++)
printf("a[%d] = %d\n",i,array[i]);
}
__global__ void increment_smart(int *g) {
// Which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x ;
// each thread to increment consecutive element, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE ;
// int temp = g[i];
// g[i] = g[i] + 1;
atomicAdd(&g[i],1);
}
int main(int argc, char **argv) {
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS/BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memmory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate and zero out GPU memory
int *d_array;
hipMalloc((void**) &d_array, ARRAY_BYTES);
hipMemset((void*) d_array, 0, ARRAY_BYTES);
// Benchmarking
timer.Start();
hipLaunchKernelGGL(( increment_smart), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
timer.Stop();
// Copy back the array
hipMemcpy(h_array,d_array,ARRAY_BYTES,hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time lapsed = %g ms\n",timer.Elapsed());
hipFree(d_array);
return 0;
}
| a31d0997ea1781b510aaa3edad86c6c99ea20092.cu | #include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 10
#define BLOCK_WIDTH 1000
void print_array(int *array, int size) {
for (int i = 0 ; i < size ; i++)
printf("a[%d] = %d\n",i,array[i]);
}
__global__ void increment_smart(int *g) {
// Which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x ;
// each thread to increment consecutive element, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE ;
// int temp = g[i];
// g[i] = g[i] + 1;
atomicAdd(&g[i],1);
}
int main(int argc, char **argv) {
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS/BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memmory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate and zero out GPU memory
int *d_array;
cudaMalloc((void**) &d_array, ARRAY_BYTES);
cudaMemset((void*) d_array, 0, ARRAY_BYTES);
// Benchmarking
timer.Start();
increment_smart<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// Copy back the array
cudaMemcpy(h_array,d_array,ARRAY_BYTES,cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time lapsed = %g ms\n",timer.Elapsed());
cudaFree(d_array);
return 0;
}
|
e951bacaab7ea5d2cadf2afa506cb15ec6f56ea3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
__global__ void myker(void)
{
}
int main()
{
hipLaunchKernelGGL(( myker), dim3(1),dim3(1), 0, 0, );
printf("Hello");
return 0;
} | e951bacaab7ea5d2cadf2afa506cb15ec6f56ea3.cu | #include<stdio.h>
#include<cuda.h>
__global__ void myker(void)
{
}
int main()
{
myker<<<1,1>>>();
printf("Hello");
return 0;
} |
c33dcf5ac8bba8e5f55825d5c5484242b8b95bf1.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void lshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
void rshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);
} // namespace at::native
| c33dcf5ac8bba8e5f55825d5c5484242b8b95bf1.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void lshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
void rshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);
} // namespace at::native
|
184c7fcabc55146a902aa44b810738084cf90bd7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeProjections.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const float *dataraw = NULL;
hipMalloc(&dataraw, XSIZE*YSIZE);
const int *iC = NULL;
hipMalloc(&iC, XSIZE*YSIZE);
const int *st = NULL;
hipMalloc(&st, XSIZE*YSIZE);
const int *id = NULL;
hipMalloc(&id, XSIZE*YSIZE);
const float *W = NULL;
hipMalloc(&W, XSIZE*YSIZE);
float *feat = NULL;
hipMalloc(&feat, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeProjections), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dataraw,iC,st,id,W,feat);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeProjections), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dataraw,iC,st,id,W,feat);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeProjections), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,dataraw,iC,st,id,W,feat);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 184c7fcabc55146a902aa44b810738084cf90bd7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeProjections.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const float *dataraw = NULL;
cudaMalloc(&dataraw, XSIZE*YSIZE);
const int *iC = NULL;
cudaMalloc(&iC, XSIZE*YSIZE);
const int *st = NULL;
cudaMalloc(&st, XSIZE*YSIZE);
const int *id = NULL;
cudaMalloc(&id, XSIZE*YSIZE);
const float *W = NULL;
cudaMalloc(&W, XSIZE*YSIZE);
float *feat = NULL;
cudaMalloc(&feat, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeProjections<<<gridBlock,threadBlock>>>(Params,dataraw,iC,st,id,W,feat);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeProjections<<<gridBlock,threadBlock>>>(Params,dataraw,iC,st,id,W,feat);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeProjections<<<gridBlock,threadBlock>>>(Params,dataraw,iC,st,id,W,feat);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
47768836fc5123ea1087827a8531c9931e1ca032.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
hipLaunchKernelGGL(( foo), dim3(1),dim3(1), 0, 0, );
std::cout<<"The Result is "
<<hipGetErrorString(hipGetLastError())<<std::endl;
return 0;
}
| 47768836fc5123ea1087827a8531c9931e1ca032.cu | #include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
foo<<<1,1>>>();
std::cout<<"The Result is "
<<cudaGetErrorString(cudaGetLastError())<<std::endl;
return 0;
}
|
6907b9473fa42f23c0d37b7fa09d506b02b97e57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
#include <math.h> //for sqrt
const int maxThreadsPerBlock = 512; //to be on safe side. Though it could be queried
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
const int2 threadPos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (threadPos2d.x >= numCols || threadPos2d.y >= numRows)
return;
//1-D position
const int i = threadPos2d.y * numCols + threadPos2d.x;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(threadPos2d.y + filter_r, 0), (numRows - 1));
int image_c = min(max(threadPos2d.x + filter_c, 0), (numCols - 1));
float image_value = inputChannel[image_r * numCols + image_c];
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[i] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 threadPos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (threadPos2d.x >= numCols || threadPos2d.y >= numRows)
return;
//1-D position
const int i = threadPos2d.y * numCols + threadPos2d.x;
redChannel[i] = inputImageRGBA[i].x;
greenChannel[i] = inputImageRGBA[i].y;
blueChannel[i] = inputImageRGBA[i].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//Allocate memory for the filter on the GPU
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
float ratio = float(numCols)/float(numRows);
int blockDimY = int(sqrt(float(maxThreadsPerBlock)/ratio) + 1);
int blockDimX = int(blockDimY*ratio + 1);
const dim3 blockSize(blockDimX,blockDimY,1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x + 1,numRows/blockSize.y + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 6907b9473fa42f23c0d37b7fa09d506b02b97e57.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
#include <math.h> //for sqrt
const int maxThreadsPerBlock = 512; //to be on safe side. Though it could be queried
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
const int2 threadPos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (threadPos2d.x >= numCols || threadPos2d.y >= numRows)
return;
//1-D position
const int i = threadPos2d.y * numCols + threadPos2d.x;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(threadPos2d.y + filter_r, 0), (numRows - 1));
int image_c = min(max(threadPos2d.x + filter_c, 0), (numCols - 1));
float image_value = inputChannel[image_r * numCols + image_c];
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[i] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 threadPos2d = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (threadPos2d.x >= numCols || threadPos2d.y >= numRows)
return;
//1-D position
const int i = threadPos2d.y * numCols + threadPos2d.x;
redChannel[i] = inputImageRGBA[i].x;
greenChannel[i] = inputImageRGBA[i].y;
blueChannel[i] = inputImageRGBA[i].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//Allocate memory for the filter on the GPU
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
float ratio = float(numCols)/float(numRows);
int blockDimY = int(sqrt(float(maxThreadsPerBlock)/ratio) + 1);
int blockDimX = int(blockDimY*ratio + 1);
const dim3 blockSize(blockDimX,blockDimY,1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x + 1,numRows/blockSize.y + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
701b6f5ab6e2ccdbd2608da5322f9a415747c5d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
{
}
__global__ void alphaax(const int lengthC, const double alpha, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthC)
{
c[i] = alpha*a[0]*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
} | 701b6f5ab6e2ccdbd2608da5322f9a415747c5d3.cu | #include "includes.h"
extern "C"
{
}
__global__ void alphaax(const int lengthC, const double alpha, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthC)
{
c[i] = alpha*a[0]*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
} |
83df9a53f368ee7fe9c66755cbb3c65fa7c9c379.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "cudaErrorHandling.h"
texture<float, 3, hipReadModeElementType> tex_A;
texture<float, 3, hipReadModeElementType> tex_B;
__global__ void matMul(float *C, int M, int K, int N, int pitch)
{
__shared__ float sA_bf[2][8*64];
__shared__ float sB_bf[2][8*64];
float *A_pref, *A_now;
float *B_pref, *B_now;
int x = threadIdx.x;
int y = threadIdx.y;
int bx = blockIdx.x*64;
int by = blockIdx.y*64;
int batch_id = blockIdx.z;
int id = y*8+x;
int inv_id = ((id&28)<<1) + (id%4) + (id<32? 0:4); //id%32/4*8
int glbA_id = by + inv_id;
int glbB_id = bx + inv_id;
float a0[8], a1[8];
float b0[8], b1[8];
float c[8][8];
for (int i = 0; i < 8; ++i)
for (int j = 0; j < 8; j++)
c[i][j] = 0.0;
/*********************************************************************/
for (int i = 0; i < 8; ++i) { // first batch of shared store
sA_bf[0][i*64+id] = tex3D(tex_A, glbA_id, i, batch_id);
sB_bf[0][i*64+id] = tex3D(tex_B, glbB_id, i, batch_id);
}
A_pref = sA_bf[1];
B_pref = sB_bf[1];
A_now = sA_bf[0];
B_now = sB_bf[0];
int track_bf = 0;
/****************************** main loop ******************************/
for (int t = 8; t < K; t += 8) {
__syncthreads();
A_pref[id] = tex3D(tex_A, glbA_id, t, batch_id); // double buffered shared store
B_pref[id] = tex3D(tex_B, glbB_id, t, batch_id);
((float4*)a0)[0] = ((float4*)A_now)[y]; // first shared load of each step
((float4*)b0)[0] = ((float4*)B_now)[x];
((float4*)a0)[1] = ((float4*)A_now)[y+8];
((float4*)b0)[1] = ((float4*)B_now)[x+8];
#pragma unroll
for (int i = 1; i < 8; ++i) {
int base = i * 16;
A_pref[i*64+id] = tex3D(tex_A, glbA_id, t+i, batch_id); // double bufferd shared store
B_pref[i*64+id] = tex3D(tex_B, glbB_id, t+i, batch_id);
if (i&1) {
((float4*)a1)[0] = ((float4*)A_now)[base+y]; // double buffered shared load
((float4*)b1)[0] = ((float4*)B_now)[base+x];
((float4*)a1)[1] = ((float4*)A_now)[base+y+8];
((float4*)b1)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a0[ii] * b0[jj];
} else {
((float4*)a0)[0] = ((float4*)A_now)[base+y]; // double buffered shared load
((float4*)b0)[0] = ((float4*)B_now)[base+x];
((float4*)a0)[1] = ((float4*)A_now)[base+y+8];
((float4*)b0)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a1[ii] * b1[jj];
}
}
for (int i = 0; i < 8; ++i) { // remained computation of each step
for (int j = 0; j < 8; ++j) {
c[i][j] += a1[i] * b1[j];
}
}
A_pref = sA_bf[track_bf]; // shared double buffer pointer exchange
B_pref = sB_bf[track_bf];
A_now = sA_bf[1-track_bf];
B_now = sB_bf[1-track_bf];
track_bf = 1 ^ track_bf; // flip between 0 & 1
}
__syncthreads(); // need sync to ensure the last shared store complete
/************************************ remained step *******************************************/
((float4*)a0)[0] = ((float4*)A_now)[y];
((float4*)b0)[0] = ((float4*)B_now)[x];
((float4*)a0)[1] = ((float4*)A_now)[y+8];
((float4*)b0)[1] = ((float4*)B_now)[x+8];
#pragma unroll
for (int i = 1; i < 8; ++i) {
int base = i * 16;
if (i&1) {
((float4*)a1)[0] = ((float4*)A_now)[base+y];
((float4*)b1)[0] = ((float4*)B_now)[base+x];
((float4*)a1)[1] = ((float4*)A_now)[base+y+8];
((float4*)b1)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a0[ii] * b0[jj];
} else {
((float4*)a0)[0] = ((float4*)A_now)[base+y];
((float4*)b0)[0] = ((float4*)B_now)[base+x];
((float4*)a0)[1] = ((float4*)A_now)[base+y+8];
((float4*)b0)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a1[ii] * b1[jj];
}
}
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
c[i][j] += a1[i] * b1[j];
}
}
/********************************** wirte back *****************************************/
__syncthreads();
/*
baseSh: base offset for shared memory load
warp 0 start from 0+id_inwarp
warp 1 start from 64+id_inwarp
row: row number for global write
warp 0: 0 for first 16 threads; 8 for second 16 threads;
warp 1: 32 for first 16 threads; 40 for second 16 threads;
*/
C += batch_id * pitch * M;
int baseSh = (id<32? 0:64) + (id&31);
int row = by + ((id&16)>>1) + (id<32? 0:32);
for (int i = 0; i < 8; ++i) {
int rowi = row+i;
((float4*)sA_bf[0])[id*2] = ((float4*)(c[i]))[0];
((float4*)sA_bf[0])[id*2+1] = ((float4*)(c[i]))[1];
if (bx + id%16*4 < pitch) { // bound condition in x direction
if (rowi < M) // bound condition in y direction
((float4*)&C[(rowi )*pitch+bx])[id%16] = ((float4*)sA_bf[0])[baseSh]; // row 0 and 8 | 32 and 40
if (rowi+16 < M) //bound condition in y direction
((float4*)&C[(rowi+16)*pitch+bx])[id%16] = ((float4*)sA_bf[0])[baseSh+32]; // row 16 and 24 | 48 and 56
}
}
}
int main(int argc, char *argv[])
{
if (argc != 5) {
printf("usage: ./xxx m n k batch\n");
return -1;
}
int m = atoi(argv[1]);
int n = atoi(argv[2]);
int k = atoi(argv[3]);
int batch = atoi(argv[4]);
int pitch = ((n-1)/4+1)*4;
float *A = (float*)malloc(m*k*batch*sizeof(float));
float *B = (float*)malloc(k*n*batch*sizeof(float));
float *C = (float*)malloc(m*n*batch*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k*batch; ++i) {
A[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n*batch; ++i) {
B[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
float *dev_C = NULL;
err_handling( hipMalloc((void**)&dev_C, m*pitch*batch*sizeof(float)) );
hipChannelFormatDesc ADesc = hipCreateChannelDesc<float>();
hipChannelFormatDesc BDesc = hipCreateChannelDesc<float>();
hipArray *A_array, *B_array;
hipExtent extentA, extentB;
extentA = make_hipExtent(m, k, batch);
extentB = make_hipExtent(n, k, batch);
err_handling( hipMalloc3DArray(&A_array, &ADesc, extentA) );
err_handling( hipMalloc3DArray(&B_array, &BDesc, extentB) );
hipMemcpy3DParms copyParamsA = {0};
copyParamsA.srcPtr = make_hipPitchedPtr((void*)A, m*sizeof(float), m, k);
copyParamsA.dstArray = A_array;
copyParamsA.extent = extentA;
copyParamsA.kind = hipMemcpyHostToDevice;
err_handling( hipMemcpy3D(©ParamsA) );
hipMemcpy3DParms copyParamsB = {0};
copyParamsB.srcPtr = make_hipPitchedPtr((void*)B, n*sizeof(float), n, k);
copyParamsB.dstArray = B_array;
copyParamsB.extent = extentB;
copyParamsB.kind = hipMemcpyHostToDevice;
err_handling( hipMemcpy3D(©ParamsB) );
err_handling( hipBindTextureToArray(tex_A, A_array) );
err_handling( hipBindTextureToArray(tex_B, B_array) );
tex_A.addressMode[0] = hipAddressModeBorder;
tex_A.addressMode[1] = hipAddressModeBorder;
tex_B.addressMode[0] = hipAddressModeBorder;
tex_B.addressMode[1] = hipAddressModeBorder;
dim3 dimGrid((n-1)/64+1, (m-1)/64+1, batch);
dim3 dimBlock(8, 8, 1);
hipEvent_t start, stop;
err_handling( hipEventCreate(&start) );
err_handling( hipEventCreate(&stop) );
err_handling( hipEventRecord(start, 0) );
hipLaunchKernelGGL(( matMul), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_C, m, k, n, pitch);
err_handling( hipEventRecord(stop, 0) );
err_handling( hipEventSynchronize(start) );
err_handling( hipEventSynchronize(stop) );
float time_elapsed = 0;
err_handling( hipEventElapsedTime(&time_elapsed, start, stop) );
printf("time %fms\n", time_elapsed);
hipExtent extentC;
extentC = make_hipExtent(n*sizeof(float), m, batch);
hipMemcpy3DParms copyParamsC = {0};
copyParamsC.srcPtr = make_hipPitchedPtr((void*)dev_C, pitch*sizeof(float), n, m);
copyParamsC.dstPtr = make_hipPitchedPtr((void*)C, n*sizeof(float), n, m);
copyParamsC.extent = extentC;
copyParamsC.kind = hipMemcpyDeviceToHost;
err_handling( hipMemcpy3D(©ParamsC) );
FILE *fp = fopen("gpu.out", "w");
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
fprintf(fp, "%f\n", C[b*n*m+i*n+j]);
}
}
}
fclose(fp);
err_handling( hipUnbindTexture(tex_A) );
err_handling( hipUnbindTexture(tex_B) );
err_handling( hipFreeArray(A_array) );
err_handling( hipFreeArray(B_array) );
err_handling( hipFree(dev_C) );
err_handling( hipDeviceReset() );
return 0;
}
| 83df9a53f368ee7fe9c66755cbb3c65fa7c9c379.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include "cudaErrorHandling.h"
texture<float, 3, cudaReadModeElementType> tex_A;
texture<float, 3, cudaReadModeElementType> tex_B;
__global__ void matMul(float *C, int M, int K, int N, int pitch)
{
__shared__ float sA_bf[2][8*64];
__shared__ float sB_bf[2][8*64];
float *A_pref, *A_now;
float *B_pref, *B_now;
int x = threadIdx.x;
int y = threadIdx.y;
int bx = blockIdx.x*64;
int by = blockIdx.y*64;
int batch_id = blockIdx.z;
int id = y*8+x;
int inv_id = ((id&28)<<1) + (id%4) + (id<32? 0:4); //id%32/4*8
int glbA_id = by + inv_id;
int glbB_id = bx + inv_id;
float a0[8], a1[8];
float b0[8], b1[8];
float c[8][8];
for (int i = 0; i < 8; ++i)
for (int j = 0; j < 8; j++)
c[i][j] = 0.0;
/*********************************************************************/
for (int i = 0; i < 8; ++i) { // first batch of shared store
sA_bf[0][i*64+id] = tex3D(tex_A, glbA_id, i, batch_id);
sB_bf[0][i*64+id] = tex3D(tex_B, glbB_id, i, batch_id);
}
A_pref = sA_bf[1];
B_pref = sB_bf[1];
A_now = sA_bf[0];
B_now = sB_bf[0];
int track_bf = 0;
/****************************** main loop ******************************/
for (int t = 8; t < K; t += 8) {
__syncthreads();
A_pref[id] = tex3D(tex_A, glbA_id, t, batch_id); // double buffered shared store
B_pref[id] = tex3D(tex_B, glbB_id, t, batch_id);
((float4*)a0)[0] = ((float4*)A_now)[y]; // first shared load of each step
((float4*)b0)[0] = ((float4*)B_now)[x];
((float4*)a0)[1] = ((float4*)A_now)[y+8];
((float4*)b0)[1] = ((float4*)B_now)[x+8];
#pragma unroll
for (int i = 1; i < 8; ++i) {
int base = i * 16;
A_pref[i*64+id] = tex3D(tex_A, glbA_id, t+i, batch_id); // double bufferd shared store
B_pref[i*64+id] = tex3D(tex_B, glbB_id, t+i, batch_id);
if (i&1) {
((float4*)a1)[0] = ((float4*)A_now)[base+y]; // double buffered shared load
((float4*)b1)[0] = ((float4*)B_now)[base+x];
((float4*)a1)[1] = ((float4*)A_now)[base+y+8];
((float4*)b1)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a0[ii] * b0[jj];
} else {
((float4*)a0)[0] = ((float4*)A_now)[base+y]; // double buffered shared load
((float4*)b0)[0] = ((float4*)B_now)[base+x];
((float4*)a0)[1] = ((float4*)A_now)[base+y+8];
((float4*)b0)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a1[ii] * b1[jj];
}
}
for (int i = 0; i < 8; ++i) { // remained computation of each step
for (int j = 0; j < 8; ++j) {
c[i][j] += a1[i] * b1[j];
}
}
A_pref = sA_bf[track_bf]; // shared double buffer pointer exchange
B_pref = sB_bf[track_bf];
A_now = sA_bf[1-track_bf];
B_now = sB_bf[1-track_bf];
track_bf = 1 ^ track_bf; // flip between 0 & 1
}
__syncthreads(); // need sync to ensure the last shared store complete
/************************************ remained step *******************************************/
((float4*)a0)[0] = ((float4*)A_now)[y];
((float4*)b0)[0] = ((float4*)B_now)[x];
((float4*)a0)[1] = ((float4*)A_now)[y+8];
((float4*)b0)[1] = ((float4*)B_now)[x+8];
#pragma unroll
for (int i = 1; i < 8; ++i) {
int base = i * 16;
if (i&1) {
((float4*)a1)[0] = ((float4*)A_now)[base+y];
((float4*)b1)[0] = ((float4*)B_now)[base+x];
((float4*)a1)[1] = ((float4*)A_now)[base+y+8];
((float4*)b1)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a0[ii] * b0[jj];
} else {
((float4*)a0)[0] = ((float4*)A_now)[base+y];
((float4*)b0)[0] = ((float4*)B_now)[base+x];
((float4*)a0)[1] = ((float4*)A_now)[base+y+8];
((float4*)b0)[1] = ((float4*)B_now)[base+x+8];
for (int ii = 0; ii < 8; ++ii)
for (int jj = 0; jj < 8; ++jj)
c[ii][jj] += a1[ii] * b1[jj];
}
}
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
c[i][j] += a1[i] * b1[j];
}
}
/********************************** wirte back *****************************************/
__syncthreads();
/*
baseSh: base offset for shared memory load
warp 0 start from 0+id_inwarp
warp 1 start from 64+id_inwarp
row: row number for global write
warp 0: 0 for first 16 threads; 8 for second 16 threads;
warp 1: 32 for first 16 threads; 40 for second 16 threads;
*/
C += batch_id * pitch * M;
int baseSh = (id<32? 0:64) + (id&31);
int row = by + ((id&16)>>1) + (id<32? 0:32);
for (int i = 0; i < 8; ++i) {
int rowi = row+i;
((float4*)sA_bf[0])[id*2] = ((float4*)(c[i]))[0];
((float4*)sA_bf[0])[id*2+1] = ((float4*)(c[i]))[1];
if (bx + id%16*4 < pitch) { // bound condition in x direction
if (rowi < M) // bound condition in y direction
((float4*)&C[(rowi )*pitch+bx])[id%16] = ((float4*)sA_bf[0])[baseSh]; // row 0 and 8 | 32 and 40
if (rowi+16 < M) //bound condition in y direction
((float4*)&C[(rowi+16)*pitch+bx])[id%16] = ((float4*)sA_bf[0])[baseSh+32]; // row 16 and 24 | 48 and 56
}
}
}
int main(int argc, char *argv[])
{
if (argc != 5) {
printf("usage: ./xxx m n k batch\n");
return -1;
}
int m = atoi(argv[1]);
int n = atoi(argv[2]);
int k = atoi(argv[3]);
int batch = atoi(argv[4]);
int pitch = ((n-1)/4+1)*4;
float *A = (float*)malloc(m*k*batch*sizeof(float));
float *B = (float*)malloc(k*n*batch*sizeof(float));
float *C = (float*)malloc(m*n*batch*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k*batch; ++i) {
A[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n*batch; ++i) {
B[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
float *dev_C = NULL;
err_handling( cudaMalloc((void**)&dev_C, m*pitch*batch*sizeof(float)) );
cudaChannelFormatDesc ADesc = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc BDesc = cudaCreateChannelDesc<float>();
cudaArray *A_array, *B_array;
cudaExtent extentA, extentB;
extentA = make_cudaExtent(m, k, batch);
extentB = make_cudaExtent(n, k, batch);
err_handling( cudaMalloc3DArray(&A_array, &ADesc, extentA) );
err_handling( cudaMalloc3DArray(&B_array, &BDesc, extentB) );
cudaMemcpy3DParms copyParamsA = {0};
copyParamsA.srcPtr = make_cudaPitchedPtr((void*)A, m*sizeof(float), m, k);
copyParamsA.dstArray = A_array;
copyParamsA.extent = extentA;
copyParamsA.kind = cudaMemcpyHostToDevice;
err_handling( cudaMemcpy3D(©ParamsA) );
cudaMemcpy3DParms copyParamsB = {0};
copyParamsB.srcPtr = make_cudaPitchedPtr((void*)B, n*sizeof(float), n, k);
copyParamsB.dstArray = B_array;
copyParamsB.extent = extentB;
copyParamsB.kind = cudaMemcpyHostToDevice;
err_handling( cudaMemcpy3D(©ParamsB) );
err_handling( cudaBindTextureToArray(tex_A, A_array) );
err_handling( cudaBindTextureToArray(tex_B, B_array) );
tex_A.addressMode[0] = cudaAddressModeBorder;
tex_A.addressMode[1] = cudaAddressModeBorder;
tex_B.addressMode[0] = cudaAddressModeBorder;
tex_B.addressMode[1] = cudaAddressModeBorder;
dim3 dimGrid((n-1)/64+1, (m-1)/64+1, batch);
dim3 dimBlock(8, 8, 1);
cudaEvent_t start, stop;
err_handling( cudaEventCreate(&start) );
err_handling( cudaEventCreate(&stop) );
err_handling( cudaEventRecord(start, 0) );
matMul<<<dimGrid, dimBlock>>>(dev_C, m, k, n, pitch);
err_handling( cudaEventRecord(stop, 0) );
err_handling( cudaEventSynchronize(start) );
err_handling( cudaEventSynchronize(stop) );
float time_elapsed = 0;
err_handling( cudaEventElapsedTime(&time_elapsed, start, stop) );
printf("time %fms\n", time_elapsed);
cudaExtent extentC;
extentC = make_cudaExtent(n*sizeof(float), m, batch);
cudaMemcpy3DParms copyParamsC = {0};
copyParamsC.srcPtr = make_cudaPitchedPtr((void*)dev_C, pitch*sizeof(float), n, m);
copyParamsC.dstPtr = make_cudaPitchedPtr((void*)C, n*sizeof(float), n, m);
copyParamsC.extent = extentC;
copyParamsC.kind = cudaMemcpyDeviceToHost;
err_handling( cudaMemcpy3D(©ParamsC) );
FILE *fp = fopen("gpu.out", "w");
for (int b = 0; b < batch; ++b) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
fprintf(fp, "%f\n", C[b*n*m+i*n+j]);
}
}
}
fclose(fp);
err_handling( cudaUnbindTexture(tex_A) );
err_handling( cudaUnbindTexture(tex_B) );
err_handling( cudaFreeArray(A_array) );
err_handling( cudaFreeArray(B_array) );
err_handling( cudaFree(dev_C) );
err_handling( cudaDeviceReset() );
return 0;
}
|
27214bd5b1856bda284c7dfb4dde86cf58ab29eb.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef RS2_USE_CUDA
#include "cuda-align.cuh"
#include "../../../include/librealsense2-framos/rsutil.h"
#include "../../cuda/rscuda_utils.cuh"
// CUDA headers
#include <hip/hip_runtime.h>
#ifdef _MSC_VER
// Add library dependencies if using VS
#pragma comment(lib, "cudart_static")
#endif
#define RS2_CUDA_THREADS_PER_BLOCK 32
using namespace librealsense;
using namespace rscuda;
template<int N> struct bytes { unsigned char b[N]; };
int calc_block_size(int pixel_count, int thread_count)
{
return ((pixel_count % thread_count) == 0) ? (pixel_count / thread_count) : (pixel_count / thread_count + 1);
}
__device__ void kernel_transfer_pixels(int2* mapped_pixels, const rs2_intrinsics* depth_intrin,
const rs2_intrinsics* other_intrin, const rs2_extrinsics* depth_to_other, float depth_val, int depth_x, int depth_y, int block_index)
{
float shift = block_index ? 0.5 : -0.5;
auto depth_size = depth_intrin->width * depth_intrin->height;
auto mapped_index = block_index * depth_size + (depth_y * depth_intrin->width + depth_x);
if (mapped_index >= depth_size * 2)
return;
// Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images
if (depth_val == 0)
{
mapped_pixels[mapped_index] = { -1, -1 };
return;
}
//// Map the top-left corner of the depth pixel onto the other image
float depth_pixel[2] = { depth_x + shift, depth_y + shift }, depth_point[3], other_point[3], other_pixel[2];
rscuda::rs2_deproject_pixel_to_point(depth_point, depth_intrin, depth_pixel, depth_val);
rscuda::rs2_transform_point_to_point(other_point, depth_to_other, depth_point);
rscuda::rs2_project_point_to_pixel(other_pixel, other_intrin, other_point);
mapped_pixels[mapped_index].x = static_cast<int>(other_pixel[0] + 0.5f);
mapped_pixels[mapped_index].y = static_cast<int>(other_pixel[1] + 0.5f);
}
__global__ void kernel_map_depth_to_other(int2* mapped_pixels, const uint16_t* depth_in, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin,
const rs2_extrinsics* depth_to_other, float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
float depth_val = depth_in[depth_pixel_index] * depth_scale;
kernel_transfer_pixels(mapped_pixels, depth_intrin, other_intrin, depth_to_other, depth_val, depth_x, depth_y, blockIdx.z);
}
template<int BPP>
__global__ void kernel_other_to_depth(unsigned char* aligned, const unsigned char* other, const int2* mapped_pixels, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
auto in_other = (const bytes<BPP> *)(other);
auto out_other = (bytes<BPP> *)(aligned);
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto other_pixel_index = y * other_intrin->width + x;
out_other[depth_pixel_index] = in_other[other_pixel_index];
}
}
}
__global__ void kernel_depth_to_other(uint16_t* aligned_out, const uint16_t* depth_in, const int2* mapped_pixels, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
unsigned int new_val = depth_in[depth_pixel_index];
unsigned int* arr = (unsigned int*)aligned_out;
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto other_pixel_index = y * other_intrin->width + x;
new_val = new_val << 16 | new_val;
atomicMin(&arr[other_pixel_index / 2], new_val);
}
}
}
__global__ void kernel_replace_to_zero(uint16_t* aligned_out, const rs2_intrinsics* other_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
auto other_pixel_index = y * other_intrin->width + x;
if (aligned_out[other_pixel_index] == 0xffff)
aligned_out[other_pixel_index] = 0;
}
void align_cuda_helper::align_other_to_depth(unsigned char* h_aligned_out, const uint16_t* h_depth_in,
float depth_scale, const rs2_intrinsics& h_depth_intrin, const rs2_extrinsics& h_depth_to_other,
const rs2_intrinsics& h_other_intrin, const unsigned char* h_other_in, rs2_format other_format, int other_bytes_per_pixel)
{
int depth_pixel_count = h_depth_intrin.width * h_depth_intrin.height;
int other_pixel_count = h_other_intrin.width * h_other_intrin.height;
int depth_size = depth_pixel_count * 2;
int other_size = other_pixel_count * other_bytes_per_pixel;
int aligned_pixel_count = depth_pixel_count;
int aligned_size = aligned_pixel_count * other_bytes_per_pixel;
// allocate and copy objects to cuda device memory
if (!_d_depth_intrinsics) _d_depth_intrinsics = make_device_copy(h_depth_intrin);
if (!_d_other_intrinsics) _d_other_intrinsics = make_device_copy(h_other_intrin);
if (!_d_depth_other_extrinsics) _d_depth_other_extrinsics = make_device_copy(h_depth_to_other);
if (!_d_depth_in) _d_depth_in = alloc_dev<uint16_t>(aligned_pixel_count);
hipMemcpy(_d_depth_in.get(), h_depth_in, depth_size, hipMemcpyHostToDevice);
if (!_d_other_in) _d_other_in = alloc_dev<unsigned char>(other_size);
hipMemcpy(_d_other_in.get(), h_other_in, other_size, hipMemcpyHostToDevice);
if (!_d_aligned_out)
_d_aligned_out = alloc_dev<unsigned char>(aligned_size);
hipMemset(_d_aligned_out.get(), 0, aligned_size);
if (!_d_pixel_map) _d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2);
// config threads
dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(calc_block_size(h_depth_intrin.width, threads.x), calc_block_size(h_depth_intrin.height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
hipLaunchKernelGGL(( kernel_map_depth_to_other) , dim3(mapping_blocks),dim3(threads), 0, 0, _d_pixel_map.get(), _d_depth_in.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get(),
_d_depth_other_extrinsics.get(), depth_scale);
switch (other_bytes_per_pixel)
{
case 1:hipLaunchKernelGGL(( kernel_other_to_depth<1>) , dim3(depth_blocks),dim3(threads), 0, 0, _d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
case 2:hipLaunchKernelGGL(( kernel_other_to_depth<2>) , dim3(depth_blocks),dim3(threads), 0, 0, _d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
case 3:hipLaunchKernelGGL(( kernel_other_to_depth<3>) , dim3(depth_blocks),dim3(threads), 0, 0, _d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
case 4:hipLaunchKernelGGL(( kernel_other_to_depth<4>) , dim3(depth_blocks),dim3(threads), 0, 0, _d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
}
hipDeviceSynchronize();
hipMemcpy(h_aligned_out, _d_aligned_out.get(), aligned_size, hipMemcpyDeviceToHost);
}
void align_cuda_helper::align_depth_to_other(unsigned char* h_aligned_out, const uint16_t* h_depth_in,
float depth_scale, const rs2_intrinsics& h_depth_intrin, const rs2_extrinsics& h_depth_to_other,
const rs2_intrinsics& h_other_intrin)
{
int depth_pixel_count = h_depth_intrin.width * h_depth_intrin.height;
int other_pixel_count = h_other_intrin.width * h_other_intrin.height;
int aligned_pixel_count = other_pixel_count;
int depth_byte_size = depth_pixel_count * 2;
int aligned_byte_size = aligned_pixel_count * 2;
// allocate and copy objects to cuda device memory
if (!_d_depth_intrinsics) _d_depth_intrinsics = make_device_copy(h_depth_intrin);
if (!_d_other_intrinsics) _d_other_intrinsics = make_device_copy(h_other_intrin);
if (!_d_depth_other_extrinsics) _d_depth_other_extrinsics = make_device_copy(h_depth_to_other);
if (!_d_depth_in) _d_depth_in = alloc_dev<uint16_t>(depth_pixel_count);
hipMemcpy(_d_depth_in.get(), h_depth_in, depth_byte_size, hipMemcpyHostToDevice);
if (!_d_aligned_out) _d_aligned_out = alloc_dev<unsigned char>(aligned_byte_size);
hipMemset(_d_aligned_out.get(), 0xff, aligned_byte_size);
if (!_d_pixel_map) _d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2);
// config threads
dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(calc_block_size(h_depth_intrin.width, threads.x), calc_block_size(h_depth_intrin.height, threads.y));
dim3 other_blocks(calc_block_size(h_other_intrin.width, threads.x), calc_block_size(h_other_intrin.height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
hipLaunchKernelGGL(( kernel_map_depth_to_other) , dim3(mapping_blocks),dim3(threads), 0, 0, _d_pixel_map.get(), _d_depth_in.get(), _d_depth_intrinsics.get(),
_d_other_intrinsics.get(), _d_depth_other_extrinsics.get(), depth_scale);
hipLaunchKernelGGL(( kernel_depth_to_other) , dim3(depth_blocks),dim3(threads), 0, 0, (uint16_t*)_d_aligned_out.get(), _d_depth_in.get(), _d_pixel_map.get(),
_d_depth_intrinsics.get(), _d_other_intrinsics.get());
hipLaunchKernelGGL(( kernel_replace_to_zero) , dim3(other_blocks), dim3(threads), 0, 0, (uint16_t*)_d_aligned_out.get(), _d_other_intrinsics.get());
hipDeviceSynchronize();
hipMemcpy(h_aligned_out, _d_aligned_out.get(), aligned_pixel_count * 2, hipMemcpyDeviceToHost);
}
#endif //RS2_USE_CUDA
| 27214bd5b1856bda284c7dfb4dde86cf58ab29eb.cu | #ifdef RS2_USE_CUDA
#include "cuda-align.cuh"
#include "../../../include/librealsense2-framos/rsutil.h"
#include "../../cuda/rscuda_utils.cuh"
// CUDA headers
#include <cuda_runtime.h>
#ifdef _MSC_VER
// Add library dependencies if using VS
#pragma comment(lib, "cudart_static")
#endif
#define RS2_CUDA_THREADS_PER_BLOCK 32
using namespace librealsense;
using namespace rscuda;
template<int N> struct bytes { unsigned char b[N]; };
int calc_block_size(int pixel_count, int thread_count)
{
return ((pixel_count % thread_count) == 0) ? (pixel_count / thread_count) : (pixel_count / thread_count + 1);
}
__device__ void kernel_transfer_pixels(int2* mapped_pixels, const rs2_intrinsics* depth_intrin,
const rs2_intrinsics* other_intrin, const rs2_extrinsics* depth_to_other, float depth_val, int depth_x, int depth_y, int block_index)
{
float shift = block_index ? 0.5 : -0.5;
auto depth_size = depth_intrin->width * depth_intrin->height;
auto mapped_index = block_index * depth_size + (depth_y * depth_intrin->width + depth_x);
if (mapped_index >= depth_size * 2)
return;
// Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images
if (depth_val == 0)
{
mapped_pixels[mapped_index] = { -1, -1 };
return;
}
//// Map the top-left corner of the depth pixel onto the other image
float depth_pixel[2] = { depth_x + shift, depth_y + shift }, depth_point[3], other_point[3], other_pixel[2];
rscuda::rs2_deproject_pixel_to_point(depth_point, depth_intrin, depth_pixel, depth_val);
rscuda::rs2_transform_point_to_point(other_point, depth_to_other, depth_point);
rscuda::rs2_project_point_to_pixel(other_pixel, other_intrin, other_point);
mapped_pixels[mapped_index].x = static_cast<int>(other_pixel[0] + 0.5f);
mapped_pixels[mapped_index].y = static_cast<int>(other_pixel[1] + 0.5f);
}
__global__ void kernel_map_depth_to_other(int2* mapped_pixels, const uint16_t* depth_in, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin,
const rs2_extrinsics* depth_to_other, float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
float depth_val = depth_in[depth_pixel_index] * depth_scale;
kernel_transfer_pixels(mapped_pixels, depth_intrin, other_intrin, depth_to_other, depth_val, depth_x, depth_y, blockIdx.z);
}
template<int BPP>
__global__ void kernel_other_to_depth(unsigned char* aligned, const unsigned char* other, const int2* mapped_pixels, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
auto in_other = (const bytes<BPP> *)(other);
auto out_other = (bytes<BPP> *)(aligned);
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto other_pixel_index = y * other_intrin->width + x;
out_other[depth_pixel_index] = in_other[other_pixel_index];
}
}
}
__global__ void kernel_depth_to_other(uint16_t* aligned_out, const uint16_t* depth_in, const int2* mapped_pixels, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
unsigned int new_val = depth_in[depth_pixel_index];
unsigned int* arr = (unsigned int*)aligned_out;
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto other_pixel_index = y * other_intrin->width + x;
new_val = new_val << 16 | new_val;
atomicMin(&arr[other_pixel_index / 2], new_val);
}
}
}
__global__ void kernel_replace_to_zero(uint16_t* aligned_out, const rs2_intrinsics* other_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
auto other_pixel_index = y * other_intrin->width + x;
if (aligned_out[other_pixel_index] == 0xffff)
aligned_out[other_pixel_index] = 0;
}
void align_cuda_helper::align_other_to_depth(unsigned char* h_aligned_out, const uint16_t* h_depth_in,
float depth_scale, const rs2_intrinsics& h_depth_intrin, const rs2_extrinsics& h_depth_to_other,
const rs2_intrinsics& h_other_intrin, const unsigned char* h_other_in, rs2_format other_format, int other_bytes_per_pixel)
{
int depth_pixel_count = h_depth_intrin.width * h_depth_intrin.height;
int other_pixel_count = h_other_intrin.width * h_other_intrin.height;
int depth_size = depth_pixel_count * 2;
int other_size = other_pixel_count * other_bytes_per_pixel;
int aligned_pixel_count = depth_pixel_count;
int aligned_size = aligned_pixel_count * other_bytes_per_pixel;
// allocate and copy objects to cuda device memory
if (!_d_depth_intrinsics) _d_depth_intrinsics = make_device_copy(h_depth_intrin);
if (!_d_other_intrinsics) _d_other_intrinsics = make_device_copy(h_other_intrin);
if (!_d_depth_other_extrinsics) _d_depth_other_extrinsics = make_device_copy(h_depth_to_other);
if (!_d_depth_in) _d_depth_in = alloc_dev<uint16_t>(aligned_pixel_count);
cudaMemcpy(_d_depth_in.get(), h_depth_in, depth_size, cudaMemcpyHostToDevice);
if (!_d_other_in) _d_other_in = alloc_dev<unsigned char>(other_size);
cudaMemcpy(_d_other_in.get(), h_other_in, other_size, cudaMemcpyHostToDevice);
if (!_d_aligned_out)
_d_aligned_out = alloc_dev<unsigned char>(aligned_size);
cudaMemset(_d_aligned_out.get(), 0, aligned_size);
if (!_d_pixel_map) _d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2);
// config threads
dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(calc_block_size(h_depth_intrin.width, threads.x), calc_block_size(h_depth_intrin.height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
kernel_map_depth_to_other <<<mapping_blocks,threads>>> (_d_pixel_map.get(), _d_depth_in.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get(),
_d_depth_other_extrinsics.get(), depth_scale);
switch (other_bytes_per_pixel)
{
case 1: kernel_other_to_depth<1> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
case 2: kernel_other_to_depth<2> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
case 3: kernel_other_to_depth<3> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
case 4: kernel_other_to_depth<4> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break;
}
cudaDeviceSynchronize();
cudaMemcpy(h_aligned_out, _d_aligned_out.get(), aligned_size, cudaMemcpyDeviceToHost);
}
void align_cuda_helper::align_depth_to_other(unsigned char* h_aligned_out, const uint16_t* h_depth_in,
float depth_scale, const rs2_intrinsics& h_depth_intrin, const rs2_extrinsics& h_depth_to_other,
const rs2_intrinsics& h_other_intrin)
{
int depth_pixel_count = h_depth_intrin.width * h_depth_intrin.height;
int other_pixel_count = h_other_intrin.width * h_other_intrin.height;
int aligned_pixel_count = other_pixel_count;
int depth_byte_size = depth_pixel_count * 2;
int aligned_byte_size = aligned_pixel_count * 2;
// allocate and copy objects to cuda device memory
if (!_d_depth_intrinsics) _d_depth_intrinsics = make_device_copy(h_depth_intrin);
if (!_d_other_intrinsics) _d_other_intrinsics = make_device_copy(h_other_intrin);
if (!_d_depth_other_extrinsics) _d_depth_other_extrinsics = make_device_copy(h_depth_to_other);
if (!_d_depth_in) _d_depth_in = alloc_dev<uint16_t>(depth_pixel_count);
cudaMemcpy(_d_depth_in.get(), h_depth_in, depth_byte_size, cudaMemcpyHostToDevice);
if (!_d_aligned_out) _d_aligned_out = alloc_dev<unsigned char>(aligned_byte_size);
cudaMemset(_d_aligned_out.get(), 0xff, aligned_byte_size);
if (!_d_pixel_map) _d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2);
// config threads
dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(calc_block_size(h_depth_intrin.width, threads.x), calc_block_size(h_depth_intrin.height, threads.y));
dim3 other_blocks(calc_block_size(h_other_intrin.width, threads.x), calc_block_size(h_other_intrin.height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
kernel_map_depth_to_other <<<mapping_blocks,threads>>> (_d_pixel_map.get(), _d_depth_in.get(), _d_depth_intrinsics.get(),
_d_other_intrinsics.get(), _d_depth_other_extrinsics.get(), depth_scale);
kernel_depth_to_other <<<depth_blocks,threads>>> ((uint16_t*)_d_aligned_out.get(), _d_depth_in.get(), _d_pixel_map.get(),
_d_depth_intrinsics.get(), _d_other_intrinsics.get());
kernel_replace_to_zero <<<other_blocks, threads>>> ((uint16_t*)_d_aligned_out.get(), _d_other_intrinsics.get());
cudaDeviceSynchronize();
cudaMemcpy(h_aligned_out, _d_aligned_out.get(), aligned_pixel_count * 2, cudaMemcpyDeviceToHost);
}
#endif //RS2_USE_CUDA
|
e06559e44c2901f3914e187a3090599a098d435b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
testCoalescingMacroGPU;
#include "GlobalBarrier.cuh"
__global__ void forEachKernel0 (int *G0, int *G1, int NumNodes, int NumEdges, int * A, int gm_offsetIntoBlocks) {
kernelMacro0;
int tId = blockIdx.x * blockDim.x + threadIdx.x + gm_offsetIntoBlocks;
if (tId >= NumEdges) {
return;
}
e = tId;
{
A[e] = A[e] + 10;
}
}
| e06559e44c2901f3914e187a3090599a098d435b.cu |
testCoalescingMacroGPU;
#include "GlobalBarrier.cuh"
__global__ void forEachKernel0 (int *G0, int *G1, int NumNodes, int NumEdges, int * A, int gm_offsetIntoBlocks) {
kernelMacro0;
int tId = blockIdx.x * blockDim.x + threadIdx.x + gm_offsetIntoBlocks;
if (tId >= NumEdges) {
return;
}
e = tId;
{
A[e] = A[e] + 10;
}
}
|
27f38759078463c3f599156b0de1508821f0fa5f.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <iostream>
#include <numeric>
#include <array>
#include <vector>
#include <stdlib.h>
#include <random>
#include <thread>
#include <thrust/reduce.h>
#include <thrust/count.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/system/hip/execution_policy.h>
#include "thrust_rmm_allocator.h"
//#define DUMP_TO_FILE
#define MB (1024*1024)
#include "cTimer.h"
cTimer timer;
#include "cClipping.h"
typedef rmm::device_vector<float>::iterator IterFloat;
typedef rmm::device_vector<int>::iterator IterInt;
/**
Clips the input vector given the input-defined plane and copy the output to posOut
@param
posIn: input vector to be clipped
normal: normal of the plane with which posIn will be clipped
d: the point along with the nomral with which posIn will be clipped
posOut: output vector from the clipping
size: size of the posIn vector
threadId: thread id with which clipping will be ran
*/
void clip (rmm::device_vector<float> *posIn, float *normal, float d,rmm::device_vector<float> *posOut, size_t size, int threadId)
{
plane_clippingPDB clipPDB (normal, d);
rmm::device_vector<int> clipFlag ( size, -1 ); // num vertices
strided_range<IterFloat> X ( posIn->begin() , posIn->end(), 4);
strided_range<IterFloat> Y ( posIn->begin()+1, posIn->end(), 4);
strided_range<IterFloat> Z ( posIn->begin()+2, posIn->end(), 4);
strided_range<IterFloat> W ( posIn->begin()+3, posIn->end(), 4);
strided_range<IterInt> clipX ( clipFlag.begin(), clipFlag.end(), 4);
strided_range<IterInt> clipY ( clipFlag.begin()+1, clipFlag.end(), 4);
strided_range<IterInt> clipZ ( clipFlag.begin()+2, clipFlag.end(), 4);
strided_range<IterInt> clipW ( clipFlag.begin()+3, clipFlag.end(), 4);
// Apply clipPDB of each element of tuples defined using the iterators
thrust::for_each (thrust::make_zip_iterator ( thrust::make_tuple( X.begin(), Y.begin(), Z.begin(), W.begin(),
clipX.begin(), clipY.begin(), clipZ.begin (), clipW.begin() ) ),
thrust::make_zip_iterator ( thrust::make_tuple( X.end(), Y.end(), Z.end(), W.end(),
clipX.end(), clipY.end(), clipZ.end (), clipW.end() ) ),
clipPDB
);
size_t numNotClipped = thrust::count_if(clipX.begin(), clipX.end(), not_clipped<float>());
// Resize the posOut vector to the correct size and copy the information to posOut
posOut->resize(numNotClipped * 4);
thrust::copy_if( posIn->begin(), posIn->end(), clipFlag.begin(), posOut->begin(), not_clipped<float>());
}
int main (int argc, char *argv[])
{
unsigned int i, iter = 30;
size_t sx = 400, sy = 400, sz = 2000;
size_t numParticles = 0;
std::vector<float> pos; // particle positions
rmm::device_vector<float> d_pos; // particle positions in GPU
rmm::device_vector<float> d_posOut; // particle positions out in GPU
double elapsed = 0.0;
double totalElapsed = 0.0;
double transferElapsed = 0.0;
double computeElapsed = 0.0;
// This willl be used to generate plane's normals randomly
// between -1 to 1
std::mt19937 rng(time(NULL));
std::uniform_real_distribution<float> gen(-1.0, 1.0);
if (argc < 5)
{
std::cout << "Usage: clipping x_size y_size z_size iterations \n";
return 1;
}
sx = std::stoll (std::string(argv[1]));
sy = std::stoll (std::string(argv[2]));
sz = std::stoll (std::string(argv[3]));
iter = std::stoi (std::string(argv[4]));
numParticles = sx*sy*sz;
std::cout << "Domain size is " << sx << " x " << sy << " x " << sz << " = " << numParticles << " particles" << std::endl;
std::cout << "Size MB: " << (sizeof(float) * numParticles * 4.0) / MB <<std::endl;
std::cout << "Iterations: " << iter << std::endl;
std::cout << "Generating particles...\n";
// Types of allocations:
// CudaDefaultAllocation
// PoolAllocation
// CudaManagedMemory
rmmOptions_t options{PoolAllocation, 0, true};
rmmInitialize(&options);
// Timer to record time taken to initialize dataset
timer.reset ();
// Synthesize random data of given size
initDataset(&pos, sx, sy, sz);
std::cout << timer.getElapsedMilliseconds() << " ms\n";
std::cout << "done!\n";
timer.reset ();
// plane defined by normal and D
float normal[3], d = 0.0f;
std::cout << "Clipping domain...\n";
for (i=0;i<iter;i++)
{
// Generating plane's normals randomly
// between -1 to 1
normal[0] = gen(rng);
normal[1] = gen(rng);
normal[2] = gen(rng);
// Timer to record H to D transfer time
timer.reset();
d_pos = pos;
elapsed = timer.getElapsedMilliseconds();
std::cout << "H to D: " << elapsed << " ms\n";
transferElapsed += elapsed;
// Compute clipping
timer.reset ();
clip(&d_pos, normal, d, &d_posOut, d_pos.size(), 0);
elapsed = timer.getElapsedMilliseconds();
std::cout << "Particles_out " << d_posOut.size() << " clipping in " << elapsed << "ms\n";
computeElapsed += elapsed;
timer.reset();
std::vector<float> posOut(d_posOut.size());
thrust::copy(d_posOut.begin(), d_posOut.end(), posOut.begin());
// Clear d_pos, d_posOut vectors
d_pos.clear();
rmm::device_vector<float>().swap(d_pos);
d_posOut.clear();
rmm::device_vector<float>().swap(d_posOut);
// Timer to record D to H transfer time
elapsed = timer.getElapsedMilliseconds();
std::cout << "D to H: " << elapsed << " ms\n";
transferElapsed += elapsed;
#ifdef DUMP_TO_FILE
dump (posOut, posOutSize*4);
#endif
}
totalElapsed = computeElapsed + transferElapsed;
std::cout << "Total transfers Avg time (ms) after " << iter << " iterations " << transferElapsed / iter << std::endl;
std::cout << "Total compute Avg time (ms) after " << iter << " iterations " << computeElapsed / iter << std::endl;
std::cout << "Total Avg time (ms) after " << iter << " iterations " << totalElapsed / iter << std::endl;
return 0;
}
| 27f38759078463c3f599156b0de1508821f0fa5f.cu |
#include <algorithm>
#include <iostream>
#include <numeric>
#include <array>
#include <vector>
#include <stdlib.h>
#include <random>
#include <thread>
#include <thrust/reduce.h>
#include <thrust/count.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/system/cuda/execution_policy.h>
#include "thrust_rmm_allocator.h"
//#define DUMP_TO_FILE
#define MB (1024*1024)
#include "cTimer.h"
cTimer timer;
#include "cClipping.h"
typedef rmm::device_vector<float>::iterator IterFloat;
typedef rmm::device_vector<int>::iterator IterInt;
/**
Clips the input vector given the input-defined plane and copy the output to posOut
@param
posIn: input vector to be clipped
normal: normal of the plane with which posIn will be clipped
d: the point along with the nomral with which posIn will be clipped
posOut: output vector from the clipping
size: size of the posIn vector
threadId: thread id with which clipping will be ran
*/
void clip (rmm::device_vector<float> *posIn, float *normal, float d,rmm::device_vector<float> *posOut, size_t size, int threadId)
{
plane_clippingPDB clipPDB (normal, d);
rmm::device_vector<int> clipFlag ( size, -1 ); // num vertices
strided_range<IterFloat> X ( posIn->begin() , posIn->end(), 4);
strided_range<IterFloat> Y ( posIn->begin()+1, posIn->end(), 4);
strided_range<IterFloat> Z ( posIn->begin()+2, posIn->end(), 4);
strided_range<IterFloat> W ( posIn->begin()+3, posIn->end(), 4);
strided_range<IterInt> clipX ( clipFlag.begin(), clipFlag.end(), 4);
strided_range<IterInt> clipY ( clipFlag.begin()+1, clipFlag.end(), 4);
strided_range<IterInt> clipZ ( clipFlag.begin()+2, clipFlag.end(), 4);
strided_range<IterInt> clipW ( clipFlag.begin()+3, clipFlag.end(), 4);
// Apply clipPDB of each element of tuples defined using the iterators
thrust::for_each (thrust::make_zip_iterator ( thrust::make_tuple( X.begin(), Y.begin(), Z.begin(), W.begin(),
clipX.begin(), clipY.begin(), clipZ.begin (), clipW.begin() ) ),
thrust::make_zip_iterator ( thrust::make_tuple( X.end(), Y.end(), Z.end(), W.end(),
clipX.end(), clipY.end(), clipZ.end (), clipW.end() ) ),
clipPDB
);
size_t numNotClipped = thrust::count_if(clipX.begin(), clipX.end(), not_clipped<float>());
// Resize the posOut vector to the correct size and copy the information to posOut
posOut->resize(numNotClipped * 4);
thrust::copy_if( posIn->begin(), posIn->end(), clipFlag.begin(), posOut->begin(), not_clipped<float>());
}
int main (int argc, char *argv[])
{
unsigned int i, iter = 30;
size_t sx = 400, sy = 400, sz = 2000;
size_t numParticles = 0;
std::vector<float> pos; // particle positions
rmm::device_vector<float> d_pos; // particle positions in GPU
rmm::device_vector<float> d_posOut; // particle positions out in GPU
double elapsed = 0.0;
double totalElapsed = 0.0;
double transferElapsed = 0.0;
double computeElapsed = 0.0;
// This willl be used to generate plane's normals randomly
// between -1 to 1
std::mt19937 rng(time(NULL));
std::uniform_real_distribution<float> gen(-1.0, 1.0);
if (argc < 5)
{
std::cout << "Usage: clipping x_size y_size z_size iterations \n";
return 1;
}
sx = std::stoll (std::string(argv[1]));
sy = std::stoll (std::string(argv[2]));
sz = std::stoll (std::string(argv[3]));
iter = std::stoi (std::string(argv[4]));
numParticles = sx*sy*sz;
std::cout << "Domain size is " << sx << " x " << sy << " x " << sz << " = " << numParticles << " particles" << std::endl;
std::cout << "Size MB: " << (sizeof(float) * numParticles * 4.0) / MB <<std::endl;
std::cout << "Iterations: " << iter << std::endl;
std::cout << "Generating particles...\n";
// Types of allocations:
// CudaDefaultAllocation
// PoolAllocation
// CudaManagedMemory
rmmOptions_t options{PoolAllocation, 0, true};
rmmInitialize(&options);
// Timer to record time taken to initialize dataset
timer.reset ();
// Synthesize random data of given size
initDataset(&pos, sx, sy, sz);
std::cout << timer.getElapsedMilliseconds() << " ms\n";
std::cout << "done!\n";
timer.reset ();
// plane defined by normal and D
float normal[3], d = 0.0f;
std::cout << "Clipping domain...\n";
for (i=0;i<iter;i++)
{
// Generating plane's normals randomly
// between -1 to 1
normal[0] = gen(rng);
normal[1] = gen(rng);
normal[2] = gen(rng);
// Timer to record H to D transfer time
timer.reset();
d_pos = pos;
elapsed = timer.getElapsedMilliseconds();
std::cout << "H to D: " << elapsed << " ms\n";
transferElapsed += elapsed;
// Compute clipping
timer.reset ();
clip(&d_pos, normal, d, &d_posOut, d_pos.size(), 0);
elapsed = timer.getElapsedMilliseconds();
std::cout << "Particles_out " << d_posOut.size() << " clipping in " << elapsed << "ms\n";
computeElapsed += elapsed;
timer.reset();
std::vector<float> posOut(d_posOut.size());
thrust::copy(d_posOut.begin(), d_posOut.end(), posOut.begin());
// Clear d_pos, d_posOut vectors
d_pos.clear();
rmm::device_vector<float>().swap(d_pos);
d_posOut.clear();
rmm::device_vector<float>().swap(d_posOut);
// Timer to record D to H transfer time
elapsed = timer.getElapsedMilliseconds();
std::cout << "D to H: " << elapsed << " ms\n";
transferElapsed += elapsed;
#ifdef DUMP_TO_FILE
dump (posOut, posOutSize*4);
#endif
}
totalElapsed = computeElapsed + transferElapsed;
std::cout << "Total transfers Avg time (ms) after " << iter << " iterations " << transferElapsed / iter << std::endl;
std::cout << "Total compute Avg time (ms) after " << iter << " iterations " << computeElapsed / iter << std::endl;
std::cout << "Total Avg time (ms) after " << iter << " iterations " << totalElapsed / iter << std::endl;
return 0;
}
|
4d7e68f705a301ae469c1546eaad8db15999dbfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pbf_constraint.h"
#include "../../interaction/cuda/pbf_contribution.h"
#include "../../kernel/cuda/pbf_kernel.h"
#include "../../util/pbf_cuda_util.h"
#include "../../interaction/cuda/pbf_grid.h"
#include "../../interaction/cuda/pbf_neighbor_search_device_util.cuh"
#include "boundary/pbf_plane_boundary.cuh"
#include "boundary/pbf_sphere_boundary.cuh"
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
using namespace std;
extern __constant__ scalar_t h;
extern __constant__ scalar_t m;
extern __constant__ scalar_t inv_rho0;
extern __constant__ scalar_t inv_k;
namespace {
// used for scaling factor calculation
class scaling_t {
public:
scalar_t ks; // kernel sum
dom_dim kgs; // kernel gradient sum
scalar_t kg2s; // kernel gradient norm sum
__host__ __device__ scaling_t() : ks(0.f), kgs(0.f), kg2s(0.f) {}
__host__ __device__ scaling_t(scalar_t v) : ks(v), kgs(v), kg2s(v) {}
__host__ __device__ scaling_t& operator+=(const scaling_t& obj) {
this->ks += obj.ks;
this->kgs += obj.kgs;
this->kg2s += obj.kg2s;
return *this;
}
};
__host__ __device__ inline scalar_t dot_opt(const dom_dim& a, const dom_dim& b)
{
auto c = a.x * b.x;
auto d = fmaf(a.y, b.y, c);
auto e = fmaf(a.z, b.z, d);
return e;
}
__host__ __device__ inline scalar_t length_opt(const dom_dim& a)
{
auto b = a.x * a.x;
auto c = fmaf(a.y, a.y, b);
auto d = fmaf(a.z, a.z, c);
auto e = sqrtf(d);
return e;
}
} // end of unnamed ns
namespace pbf {
namespace cuda {
namespace {
#pragma region scaling_factor
template<typename kernel_t>
__device__ scaling_t calcScalingFactorPair(
const dom_dim& self_pos,
const dom_dim& pair_pos)
{
auto pos_diff = self_pos - pair_pos;
auto r = glm::length(pos_diff);
auto direction = pos_diff / r;
const auto inv_h = 1.f / h;
//auto k = pbf::kernel::cuda::weight<kernel_t>(r, smoothing_length);
auto k = pbf::kernel::cuda::weight<kernel_t>(r, inv_h);
dom_dim kg(0.f);
if (r > 0.f) {
kg = pbf::kernel::cuda::weight_deriv<kernel_t>(r, inv_h) * direction;
}
auto kg2 = dot_opt(kg, kg);
scaling_t v;
v.ks = k;
v.kgs = kg;
v.kg2s = kg2;
return v;
}
template<typename kernel_t>
__global__ void calcScalingFactorCUDA(
scalar_t* scaling_factor,
const dom_dim* position,
const uint32_t* neighbor_list,
scalar_t relaxation,
uint32_t max_pair_particle_num,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
const auto self_pos = position[index];
// Contribution Calculation
scaling_t sum_scaling_factor(0.f);
uint32_t pair_cnt = 0;
while (true) {
uint32_t pair_index = getNeighborParticleAddr(neighbor_list, index, pair_cnt, max_pair_particle_num);
if (pair_index != 0xFFFFFFFF) {
const auto pair_pos = position[pair_index];
scaling_t scaling_factor = calcScalingFactorPair<kernel_t>(self_pos, pair_pos);
sum_scaling_factor += scaling_factor;
pair_cnt++;
}
else
break;
}
auto constraint = sum_scaling_factor.ks * m * inv_rho0 - 1.f;
auto kg2s = sum_scaling_factor.kg2s + glm::dot(sum_scaling_factor.kgs, sum_scaling_factor.kgs);
auto s = constraint / (m * m * pow(inv_rho0, 2) * kg2s + relaxation);
scaling_factor[index] = s;
}
} // end of unnamed ns
void calcScalingFactor(
scalar_t* scaling_factor,
const dom_dim* position,
std::shared_ptr<neighbor_search>& ns,
scalar_t inv_stable_density, scalar_t particle_mass, scalar_t smoothing_length,
scalar_t relaxation,
int num_particle
)
{
typedef kernel::cuda::PBFKERNEL kernel_t;
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
using namespace std;
auto neighbor_list = ns->getNeighborList();
const auto max_pair_particle_num = ns->getMaxPairParticleNum();
if (num_block > 0)
hipLaunchKernelGGL(( calcScalingFactorCUDA<kernel_t>), dim3(num_block), dim3(num_thread) , 0, 0,
scaling_factor, position, neighbor_list, relaxation, max_pair_particle_num, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
namespace {
template<typename kernel_t>
__device__ scalar_t calcScalingFactorPair(
const dom_dim& grad_kernel)
{
auto kg2 = fmaf(grad_kernel.z, grad_kernel.z, fmaf(grad_kernel.y, grad_kernel.y, grad_kernel.x * grad_kernel.x));
//auto kg2 = dot_opt(grad_kernel, grad_kernel);
return kg2;
}
template<typename kernel_t>
__global__ void calcScalingFactorCUDA(
scalar_t* scaling_factor,
const scalar_t* __restrict__ kernels,
const dom_dim* __restrict__ grad_kernels,
const uint32_t* __restrict__ neighbor_list,
scalar_t relaxation,
uint32_t max_pair_particle_num,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
// Contribution Calculation
scaling_t sum_scaling_factor(0.f);
uint32_t pair_cnt = 0;
while (true) {
uint32_t neigbor_list_index = getNeighborListIndex(index, pair_cnt, max_pair_particle_num);
uint32_t pair_index = neighbor_list[neigbor_list_index];
if (pair_index != 0xFFFFFFFF) {
const auto grad_kernel = grad_kernels[neigbor_list_index];
const auto kernel = kernels[neigbor_list_index];
scalar_t kg2 = calcScalingFactorPair<kernel_t>(grad_kernel);
sum_scaling_factor.ks += kernel;
sum_scaling_factor.kgs += grad_kernel;
sum_scaling_factor.kg2s += kg2;
pair_cnt++;
}
else
break;
}
auto constraint = fmaf(sum_scaling_factor.ks * m, inv_rho0, - 1.f);
auto kg2s = fmaf(sum_scaling_factor.kgs.x, sum_scaling_factor.kgs.x, fmaf(sum_scaling_factor.kgs.z, sum_scaling_factor.kgs.z,
fmaf(sum_scaling_factor.kgs.y, sum_scaling_factor.kgs.y, sum_scaling_factor.kg2s)));
auto m2_rho2 = m * m * inv_rho0 * inv_rho0;
auto s = constraint / (fmaf(m2_rho2, kg2s, relaxation));
scaling_factor[index] = s;
}
} // end of unnamed ns
void calcScalingFactor(
scalar_t* scaling_factor,
const scalar_t* kernels,
const dom_dim* grad_kernels,
std::shared_ptr<neighbor_search>& ns,
scalar_t inv_stable_density,
scalar_t particle_mass,
scalar_t smoothing_length,
scalar_t relaxation,
int num_particle
)
{
typedef kernel::cuda::PBFKERNEL kernel_t;
uint32_t num_thread, num_block;
computeGridSize(num_particle, 192, num_block, num_thread);
using namespace std;
auto neighbor_list = ns->getNeighborList();
const auto max_pair_particle_num = ns->getMaxPairParticleNum();
if (num_block > 0)
hipLaunchKernelGGL(( calcScalingFactorCUDA<kernel_t>), dim3(num_block), dim3(num_thread) , 0, 0,
scaling_factor, kernels, grad_kernels, neighbor_list, relaxation, max_pair_particle_num, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
#pragma endregion
namespace {
// intersection of p1-p2 line and plane, no exception handling
__host__ __device__ dom_dim intersection(dom_dim p1, dom_dim p2, const glm::vec4& plane) {
auto e = (p2 - p1) / glm::length(p2 - p1);
auto abc = glm::vec3(plane.x, plane.y, plane.z);
auto k = -(glm::dot(abc, p1) + plane.w) / glm::dot(abc, e);
//auto qc = p1 + k * e;
dom_dim qc;
qc.x = fmaf(k, e.x, p1.x);
qc.y = fmaf(k, e.y, p1.y);
qc.z = fmaf(k, e.z, p1.z);
return qc;
}
} // end of unnamed ns
__global__ void responseCollisionCUDA(
dom_dim* position_update,
const dom_dim* predicted_position,
const dom_dim* old_position,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto delta_p = position_update[index];
auto predicted_x = predicted_position[index];
auto old_x = predicted_position[index];
//if (index == 0)
// printf("%f, %f, %f\n", delta_p.x, delta_p.y, delta_p.z);
while (true) {
auto p = predicted_x + delta_p;
dom_dim sim_origin(2.2f);
dom_dim sim_end(6.5f);
bool collision_check = false;
#if 0
// bottom
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_origin, dom_dim(0.f, 1.f, 0.f));
// top
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_end, dom_dim(0.f, -1.f, 0.f));
// left wall
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_origin, dom_dim(1.f, 0.f, 0.f));
// right wall
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_end, dom_dim(-1.f, 0.f, 0.f));
// front
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_origin, dom_dim(0.f, 0.f, 1.f));
// back
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_end, dom_dim(0.f, 0.f, -1.f));
#endif
#if 1
// in a sphere
responseInnerSphereBoundary(collision_check, delta_p, old_x, p, dom_dim(3.f, 3.f, 3.f), 3.f);
p = predicted_x + delta_p;
// sphere obstacles
//responseOuterSphereBoundary(collision_check, delta_p, old_x, p, dom_dim(3.f, 0.f, 3.f), 2.f);
#endif
if (!collision_check) {
break;
}
}
position_update[index] = delta_p;
//if (index == 0)
// printf("%f, %f, %f\n", delta_p.x, delta_p.y, delta_p.z);
}
void responseCollision(
dom_dim* position_update,
const dom_dim* predicted_position,
const dom_dim* old_position,
int num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
if (num_block > 0)
responseCollisionCUDA << < num_block, num_thread >> >(position_update, predicted_position, old_position, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
__global__ void updateInterimPositionCUDA(
dom_dim* position,
const dom_dim* position_update,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
position[index] += position_update[index];
}
void updateInterimPosition(
dom_dim* position,
const dom_dim* position_update,
int num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
if (num_block > 0)
updateInterimPositionCUDA<< < num_block, num_thread >> >(position, position_update, num_particle);
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
} // end of cuda ns
} // end of pbf ns
| 4d7e68f705a301ae469c1546eaad8db15999dbfd.cu | #include "pbf_constraint.h"
#include "../../interaction/cuda/pbf_contribution.h"
#include "../../kernel/cuda/pbf_kernel.h"
#include "../../util/pbf_cuda_util.h"
#include "../../interaction/cuda/pbf_grid.h"
#include "../../interaction/cuda/pbf_neighbor_search_device_util.cuh"
#include "boundary/pbf_plane_boundary.cuh"
#include "boundary/pbf_sphere_boundary.cuh"
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
using namespace std;
extern __constant__ scalar_t h;
extern __constant__ scalar_t m;
extern __constant__ scalar_t inv_rho0;
extern __constant__ scalar_t inv_k;
namespace {
// used for scaling factor calculation
class scaling_t {
public:
scalar_t ks; // kernel sum
dom_dim kgs; // kernel gradient sum
scalar_t kg2s; // kernel gradient norm sum
__host__ __device__ scaling_t() : ks(0.f), kgs(0.f), kg2s(0.f) {}
__host__ __device__ scaling_t(scalar_t v) : ks(v), kgs(v), kg2s(v) {}
__host__ __device__ scaling_t& operator+=(const scaling_t& obj) {
this->ks += obj.ks;
this->kgs += obj.kgs;
this->kg2s += obj.kg2s;
return *this;
}
};
__host__ __device__ inline scalar_t dot_opt(const dom_dim& a, const dom_dim& b)
{
auto c = a.x * b.x;
auto d = fmaf(a.y, b.y, c);
auto e = fmaf(a.z, b.z, d);
return e;
}
__host__ __device__ inline scalar_t length_opt(const dom_dim& a)
{
auto b = a.x * a.x;
auto c = fmaf(a.y, a.y, b);
auto d = fmaf(a.z, a.z, c);
auto e = sqrtf(d);
return e;
}
} // end of unnamed ns
namespace pbf {
namespace cuda {
namespace {
#pragma region scaling_factor
template<typename kernel_t>
__device__ scaling_t calcScalingFactorPair(
const dom_dim& self_pos,
const dom_dim& pair_pos)
{
auto pos_diff = self_pos - pair_pos;
auto r = glm::length(pos_diff);
auto direction = pos_diff / r;
const auto inv_h = 1.f / h;
//auto k = pbf::kernel::cuda::weight<kernel_t>(r, smoothing_length);
auto k = pbf::kernel::cuda::weight<kernel_t>(r, inv_h);
dom_dim kg(0.f);
if (r > 0.f) {
kg = pbf::kernel::cuda::weight_deriv<kernel_t>(r, inv_h) * direction;
}
auto kg2 = dot_opt(kg, kg);
scaling_t v;
v.ks = k;
v.kgs = kg;
v.kg2s = kg2;
return v;
}
template<typename kernel_t>
__global__ void calcScalingFactorCUDA(
scalar_t* scaling_factor,
const dom_dim* position,
const uint32_t* neighbor_list,
scalar_t relaxation,
uint32_t max_pair_particle_num,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
const auto self_pos = position[index];
// Contribution Calculation
scaling_t sum_scaling_factor(0.f);
uint32_t pair_cnt = 0;
while (true) {
uint32_t pair_index = getNeighborParticleAddr(neighbor_list, index, pair_cnt, max_pair_particle_num);
if (pair_index != 0xFFFFFFFF) {
const auto pair_pos = position[pair_index];
scaling_t scaling_factor = calcScalingFactorPair<kernel_t>(self_pos, pair_pos);
sum_scaling_factor += scaling_factor;
pair_cnt++;
}
else
break;
}
auto constraint = sum_scaling_factor.ks * m * inv_rho0 - 1.f;
auto kg2s = sum_scaling_factor.kg2s + glm::dot(sum_scaling_factor.kgs, sum_scaling_factor.kgs);
auto s = constraint / (m * m * pow(inv_rho0, 2) * kg2s + relaxation);
scaling_factor[index] = s;
}
} // end of unnamed ns
void calcScalingFactor(
scalar_t* scaling_factor,
const dom_dim* position,
std::shared_ptr<neighbor_search>& ns,
scalar_t inv_stable_density, scalar_t particle_mass, scalar_t smoothing_length,
scalar_t relaxation,
int num_particle
)
{
typedef kernel::cuda::PBFKERNEL kernel_t;
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
using namespace std;
auto neighbor_list = ns->getNeighborList();
const auto max_pair_particle_num = ns->getMaxPairParticleNum();
if (num_block > 0)
calcScalingFactorCUDA<kernel_t><<< num_block, num_thread >>>
(scaling_factor, position, neighbor_list, relaxation, max_pair_particle_num, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
namespace {
template<typename kernel_t>
__device__ scalar_t calcScalingFactorPair(
const dom_dim& grad_kernel)
{
auto kg2 = fmaf(grad_kernel.z, grad_kernel.z, fmaf(grad_kernel.y, grad_kernel.y, grad_kernel.x * grad_kernel.x));
//auto kg2 = dot_opt(grad_kernel, grad_kernel);
return kg2;
}
template<typename kernel_t>
__global__ void calcScalingFactorCUDA(
scalar_t* scaling_factor,
const scalar_t* __restrict__ kernels,
const dom_dim* __restrict__ grad_kernels,
const uint32_t* __restrict__ neighbor_list,
scalar_t relaxation,
uint32_t max_pair_particle_num,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
// Contribution Calculation
scaling_t sum_scaling_factor(0.f);
uint32_t pair_cnt = 0;
while (true) {
uint32_t neigbor_list_index = getNeighborListIndex(index, pair_cnt, max_pair_particle_num);
uint32_t pair_index = neighbor_list[neigbor_list_index];
if (pair_index != 0xFFFFFFFF) {
const auto grad_kernel = grad_kernels[neigbor_list_index];
const auto kernel = kernels[neigbor_list_index];
scalar_t kg2 = calcScalingFactorPair<kernel_t>(grad_kernel);
sum_scaling_factor.ks += kernel;
sum_scaling_factor.kgs += grad_kernel;
sum_scaling_factor.kg2s += kg2;
pair_cnt++;
}
else
break;
}
auto constraint = fmaf(sum_scaling_factor.ks * m, inv_rho0, - 1.f);
auto kg2s = fmaf(sum_scaling_factor.kgs.x, sum_scaling_factor.kgs.x, fmaf(sum_scaling_factor.kgs.z, sum_scaling_factor.kgs.z,
fmaf(sum_scaling_factor.kgs.y, sum_scaling_factor.kgs.y, sum_scaling_factor.kg2s)));
auto m2_rho2 = m * m * inv_rho0 * inv_rho0;
auto s = constraint / (fmaf(m2_rho2, kg2s, relaxation));
scaling_factor[index] = s;
}
} // end of unnamed ns
void calcScalingFactor(
scalar_t* scaling_factor,
const scalar_t* kernels,
const dom_dim* grad_kernels,
std::shared_ptr<neighbor_search>& ns,
scalar_t inv_stable_density,
scalar_t particle_mass,
scalar_t smoothing_length,
scalar_t relaxation,
int num_particle
)
{
typedef kernel::cuda::PBFKERNEL kernel_t;
uint32_t num_thread, num_block;
computeGridSize(num_particle, 192, num_block, num_thread);
using namespace std;
auto neighbor_list = ns->getNeighborList();
const auto max_pair_particle_num = ns->getMaxPairParticleNum();
if (num_block > 0)
calcScalingFactorCUDA<kernel_t><<< num_block, num_thread >>>
(scaling_factor, kernels, grad_kernels, neighbor_list, relaxation, max_pair_particle_num, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
#pragma endregion
namespace {
// intersection of p1-p2 line and plane, no exception handling
__host__ __device__ dom_dim intersection(dom_dim p1, dom_dim p2, const glm::vec4& plane) {
auto e = (p2 - p1) / glm::length(p2 - p1);
auto abc = glm::vec3(plane.x, plane.y, plane.z);
auto k = -(glm::dot(abc, p1) + plane.w) / glm::dot(abc, e);
//auto qc = p1 + k * e;
dom_dim qc;
qc.x = fmaf(k, e.x, p1.x);
qc.y = fmaf(k, e.y, p1.y);
qc.z = fmaf(k, e.z, p1.z);
return qc;
}
} // end of unnamed ns
__global__ void responseCollisionCUDA(
dom_dim* position_update,
const dom_dim* predicted_position,
const dom_dim* old_position,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
auto delta_p = position_update[index];
auto predicted_x = predicted_position[index];
auto old_x = predicted_position[index];
//if (index == 0)
// printf("%f, %f, %f\n", delta_p.x, delta_p.y, delta_p.z);
while (true) {
auto p = predicted_x + delta_p;
dom_dim sim_origin(2.2f);
dom_dim sim_end(6.5f);
bool collision_check = false;
#if 0
// bottom
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_origin, dom_dim(0.f, 1.f, 0.f));
// top
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_end, dom_dim(0.f, -1.f, 0.f));
// left wall
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_origin, dom_dim(1.f, 0.f, 0.f));
// right wall
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_end, dom_dim(-1.f, 0.f, 0.f));
// front
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_origin, dom_dim(0.f, 0.f, 1.f));
// back
responsePlaneBoundary(collision_check, delta_p, old_x, p, sim_end, dom_dim(0.f, 0.f, -1.f));
#endif
#if 1
// in a sphere
responseInnerSphereBoundary(collision_check, delta_p, old_x, p, dom_dim(3.f, 3.f, 3.f), 3.f);
p = predicted_x + delta_p;
// sphere obstacles
//responseOuterSphereBoundary(collision_check, delta_p, old_x, p, dom_dim(3.f, 0.f, 3.f), 2.f);
#endif
if (!collision_check) {
break;
}
}
position_update[index] = delta_p;
//if (index == 0)
// printf("%f, %f, %f\n", delta_p.x, delta_p.y, delta_p.z);
}
void responseCollision(
dom_dim* position_update,
const dom_dim* predicted_position,
const dom_dim* old_position,
int num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
if (num_block > 0)
responseCollisionCUDA << < num_block, num_thread >> >(position_update, predicted_position, old_position, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
__global__ void updateInterimPositionCUDA(
dom_dim* position,
const dom_dim* position_update,
int num_particle
)
{
const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_particle) return;
position[index] += position_update[index];
}
void updateInterimPosition(
dom_dim* position,
const dom_dim* position_update,
int num_particle)
{
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
if (num_block > 0)
updateInterimPositionCUDA<< < num_block, num_thread >> >(position, position_update, num_particle);
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
} // end of cuda ns
} // end of pbf ns
|
bd5a562b77ff4355d263f746d753eaf4c26cf2f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "../include/cuda_mpi_double.hpp"
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include <iostream>
#include "../include/cuda_matmult_double.hpp"
#include "../include/cuda_svd.hpp"
#include "../include/cuda_svd_double.hpp"
#include "../include/cuda_get_information.hpp"
#include "../include/cuda_helper.hpp"
#define THREADS_PER_BLOCK 512
constexpr size_t size_d = sizeof(double);
__global__ void cuda_invert_singular_values_device_memory( double *d_S, double *d_S_inv, const double cutoff, const int num_of_sigma, const int num_of_el )
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if ( index < num_of_el )
{
int sigma_index = ( index / num_of_sigma );
if ( sigma_index >= num_of_sigma ) d_S_inv[index] = .0;
else
{
bool is_sigma = (( index - sigma_index ) % num_of_sigma ) == 0;
if ( is_sigma && d_S[sigma_index] > cutoff ) d_S_inv[index] = ( 1.0 / d_S[sigma_index] );
else d_S_inv[index] = .0;
}
}
}
void printDeviceMatrix ( double* M, int m, int n, int lda )
{
double *tmp = (double*) malloc( m*n*sizeof(double));
hipMemcpy(tmp, M, lda*n*sizeof(double),hipMemcpyDeviceToHost);
for ( int r = 0; r < m; ++r )
{
for ( int c = 0; c < n; ++c ) std::cout << tmp[r+c*lda] << " ";
std::cout << std::endl;
}
free(tmp);
}
int check_needed_memory ( int lda, int m, int n, size_t type_size )
{
// Calculate the needed space on dRAM if all matrizes are kept on it,
// but are deleted as soon as they are not needed anymore.
long unsigned int requested_matrizes = type_size * n * ( m + 3*n );
// Calculate the needed space on dRAM if S_inv and U share the memory
// since they are the largest matrizes. U will be buffered on main RAM.
long unsigned int reducable_size = type_size * n * ( m + n +1 );
// Additional worksize of cusolverDNDgesvd
cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS;
hipsolverDnHandle_t cusolverH = NULL;
int work = 0;
long unsigned int free_i;
size_t free_s, total;
// Create cusolver handle
status = hipsolverDnCreate(&cusolverH);
if (CUSOLVER_STATUS_SUCCESS != status ) return ERROR_CUSOLVER_DN_CREATE;
// Querry buffer
status = hipsolverDnDgesvd_bufferSize( cusolverH, m, n, &work );
if ( CUSOLVER_STATUS_SUCCESS != status ) return ERROR_CUDA_BUFFER_SIZE;
// Run cudaMemGetInfor here since cuslverDnCreate allocates
// space for the cusolverH
hipMemGetInfo(&free_s,&total);
if (cusolverH) hipsolverDnDestroy(cusolverH);
work *= type_size;
requested_matrizes += ((long unsigned int) work);
reducable_size += ((long unsigned int) work);
free_i = (long unsigned int) free_s;
if ( free_i > requested_matrizes && (free_i - requested_matrizes) > CUDA_SAVE_MEMORY_BUFFER_ ) return 0;
else if ( free_i > reducable_size && (free_i - reducable_size) > CUDA_SAVE_MEMORY_BUFFER_ ) return 1;
return -1;
}
int cuda_double_moore_penrose_inverse_host_memory( double *A, double **A_inv, int m, int n, int lda, double cutoff )
{
hipError_t cudaStat = hipSuccess;
int return_value = 0;
const long unsigned int A_size = get_size_(lda, n , size_d);
const long unsigned int A_i_size = get_size_( m, n , size_d);
double *d_A; // Initial matrix A
double *d_A_inv; // Initial matrix A
cudaStat = hipMalloc ((void**)&d_A, A_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
cudaStat = hipMalloc ((void**)&d_A_inv, A_i_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
return_value = cuda_double_moore_penrose_inverse_device_memory( d_A, d_A_inv, m, n, lda, cutoff );
A_inv[0] = (double*) malloc(A_size);
cudaStat = hipMemcpy(A_inv[0], d_A_inv, A_size, hipMemcpyDeviceToHost);
cleanup:
if (d_A) hipFree(d_A);
if (d_A_inv) hipFree(d_A_inv);
hipDeviceReset();
return return_value;
}
int cuda_double_moore_penrose_inverse_device_memory( double *d_A, double *d_A_inv, int m, int n, int lda, double cutoff )
{
hipError_t cudaStat = hipSuccess;
// Start with checking the expected size of the problem.
int memory_information = check_needed_memory (lda, m, n, sizeof(double));
if ( memory_information == -1 )
{
std::cerr << "ERROR:\tNot enough GPU memory...\n";
return ERROR_CUDA_MALLOC;
}
// Some values for the following calculations
int return_value = 0;
const int S_elements = min(m,n)*n;
const long unsigned int S_size = get_size_( 1, min(m,n), size_d);
const long unsigned int S_i_size = get_size_( n, min(m,n), size_d);
const long unsigned int U_size = get_size_( m, min(m,n), size_d);
const long unsigned int V_size = get_size_( n, min(m,n), size_d);
const long unsigned int A_size = get_size_(lda, n , size_d);
// Device pointers
// double *d_A; // Initial matrix A
double *d_S; // Vector of singular values
double *d_S_inv; // diag[ 1 / sigma_i ]
double *d_U; // SVD matrix U
double *d_V; // SVD matrix V
double *d_VS; // V * diag[1/sigma_i]
double *U;
d_S = d_S_inv = d_U = d_V = d_VS = U = NULL;
// Allocate svd memory on GPU
cudaStat = hipMalloc ((void**)&d_U, U_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// cudaStat = hipMalloc ((void**)&d_A, A_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
cudaStat = hipMalloc ((void**)&d_V, V_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
cudaStat = hipMalloc ((void**)&d_S, S_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
if ( !memory_information )
{
cudaStat = hipMalloc ((void**)&d_S_inv, S_i_size);
if (hipSuccess != cudaStat)
{
return_value = ERROR_CUDA_MALLOC;
goto cleanup;
}
}
// Copy initial matrix on GPU
// cudaStat = hipMemcpy(d_A, A, A_size, hipMemcpyHostToDevice); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_COPY; goto cleanup; }
// Compute the svd
return_value = cuda_double_svd_device_memory( d_A, d_S, d_U, d_V, m, n, lda );
if ( return_value )
{
std::cout << "svd_error = " << return_value << std::endl;
goto cleanup;
}
if ( memory_information )
{
U = (double*) malloc (U_size);
cudaStat = hipMemcpy(U, d_U, U_size, hipMemcpyDeviceToHost); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_COPY; goto cleanup; }
// if (d_A) { hipFree(d_A); d_A = NULL; }
if (d_U) { hipFree(d_U); d_U = NULL; }
cudaStat = hipMalloc ((void**)&d_S_inv, S_i_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
}
// Invert the singular values and write them on the diagonal of S_inv
if ( n < THREADS_PER_BLOCK )
hipLaunchKernelGGL(( cuda_invert_singular_values_device_memory), dim3(m),dim3(n), 0, 0, d_S, d_S_inv, cutoff, n, S_elements );
else
{
int Number_of_blocks = (S_elements/THREADS_PER_BLOCK+1); // +1 -> else last sigma might be skiped!
hipLaunchKernelGGL(( cuda_invert_singular_values_device_memory), dim3(Number_of_blocks),dim3(THREADS_PER_BLOCK), 0, 0, d_S, d_S_inv, cutoff, n, S_elements );
}
// Free the vector S
if (d_S) { hipFree(d_S); d_S = NULL; }
// Allocate the memory for the product V*S_inv
cudaStat = hipMalloc ((void**)&d_VS, V_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// Compute the product
// If "small svd is used we have to choose 't' for matrix d_V!
cuda_dgemm('t', 'n', n, n, n, 1.0, d_V, n, d_S_inv, n, 0.0, d_VS, n );
// Free V since it is not needed anymore
if (d_V) { hipFree(d_V); d_V = NULL; }
if (d_S_inv) { hipFree(d_S_inv); d_S_inv = NULL; }
// If U was shifted to main RAM write the respective values back
// in initial dRAM and remove S_inv
if ( memory_information)
{
cudaStat = hipMalloc ((void**)&d_U, U_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// cudaStat = hipMalloc ((void**)&d_A, U_size); if (hipSuccess != cudaStat) return ERROR_CUDA_MALLOC;
cudaStat = hipMemcpy(d_U, U, U_size, hipMemcpyHostToDevice); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_COPY; goto cleanup; }
free(U); U = NULL;
}
// Allocate the memory for the final matrix A
//cudaStat = hipMalloc ((void**)&d_A_inv, A_size); if (hipSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// And perform the final product
cuda_dgemm('n', 't', n, m, n, 1.0, d_VS, n, d_U, lda, 0.0, d_A_inv, n );
// Copy A_inverse back on host memory
// A_inv[0] = (double*) malloc( A_size);
// cudaStat = hipMemcpy(A_inv[0], d_A, A_size, hipMemcpyDeviceToHost);
// Free device memory and make sure that everyhting is freed!
cleanup:
// if (d_A) hipFree(d_A);
if (d_S) hipFree(d_S);
if (d_S_inv) hipFree(d_S_inv);
if (d_U) hipFree(d_U);
if (d_V) hipFree(d_V);
if (d_VS) hipFree(d_VS);
if (U) free(U);
// hipDeviceReset();
return return_value;
}
| bd5a562b77ff4355d263f746d753eaf4c26cf2f1.cu |
#include "../include/cuda_mpi_double.hpp"
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <iostream>
#include "../include/cuda_matmult_double.hpp"
#include "../include/cuda_svd.hpp"
#include "../include/cuda_svd_double.hpp"
#include "../include/cuda_get_information.hpp"
#include "../include/cuda_helper.hpp"
#define THREADS_PER_BLOCK 512
constexpr size_t size_d = sizeof(double);
__global__ void cuda_invert_singular_values_device_memory( double *d_S, double *d_S_inv, const double cutoff, const int num_of_sigma, const int num_of_el )
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if ( index < num_of_el )
{
int sigma_index = ( index / num_of_sigma );
if ( sigma_index >= num_of_sigma ) d_S_inv[index] = .0;
else
{
bool is_sigma = (( index - sigma_index ) % num_of_sigma ) == 0;
if ( is_sigma && d_S[sigma_index] > cutoff ) d_S_inv[index] = ( 1.0 / d_S[sigma_index] );
else d_S_inv[index] = .0;
}
}
}
void printDeviceMatrix ( double* M, int m, int n, int lda )
{
double *tmp = (double*) malloc( m*n*sizeof(double));
cudaMemcpy(tmp, M, lda*n*sizeof(double),cudaMemcpyDeviceToHost);
for ( int r = 0; r < m; ++r )
{
for ( int c = 0; c < n; ++c ) std::cout << tmp[r+c*lda] << " ";
std::cout << std::endl;
}
free(tmp);
}
int check_needed_memory ( int lda, int m, int n, size_t type_size )
{
// Calculate the needed space on dRAM if all matrizes are kept on it,
// but are deleted as soon as they are not needed anymore.
long unsigned int requested_matrizes = type_size * n * ( m + 3*n );
// Calculate the needed space on dRAM if S_inv and U share the memory
// since they are the largest matrizes. U will be buffered on main RAM.
long unsigned int reducable_size = type_size * n * ( m + n +1 );
// Additional worksize of cusolverDNDgesvd
cusolverStatus_t status = CUSOLVER_STATUS_SUCCESS;
cusolverDnHandle_t cusolverH = NULL;
int work = 0;
long unsigned int free_i;
size_t free_s, total;
// Create cusolver handle
status = cusolverDnCreate(&cusolverH);
if (CUSOLVER_STATUS_SUCCESS != status ) return ERROR_CUSOLVER_DN_CREATE;
// Querry buffer
status = cusolverDnDgesvd_bufferSize( cusolverH, m, n, &work );
if ( CUSOLVER_STATUS_SUCCESS != status ) return ERROR_CUDA_BUFFER_SIZE;
// Run cudaMemGetInfor here since cuslverDnCreate allocates
// space for the cusolverH
cudaMemGetInfo(&free_s,&total);
if (cusolverH) cusolverDnDestroy(cusolverH);
work *= type_size;
requested_matrizes += ((long unsigned int) work);
reducable_size += ((long unsigned int) work);
free_i = (long unsigned int) free_s;
if ( free_i > requested_matrizes && (free_i - requested_matrizes) > CUDA_SAVE_MEMORY_BUFFER_ ) return 0;
else if ( free_i > reducable_size && (free_i - reducable_size) > CUDA_SAVE_MEMORY_BUFFER_ ) return 1;
return -1;
}
int cuda_double_moore_penrose_inverse_host_memory( double *A, double **A_inv, int m, int n, int lda, double cutoff )
{
cudaError_t cudaStat = cudaSuccess;
int return_value = 0;
const long unsigned int A_size = get_size_(lda, n , size_d);
const long unsigned int A_i_size = get_size_( m, n , size_d);
double *d_A; // Initial matrix A
double *d_A_inv; // Initial matrix A
cudaStat = cudaMalloc ((void**)&d_A, A_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
cudaStat = cudaMalloc ((void**)&d_A_inv, A_i_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
return_value = cuda_double_moore_penrose_inverse_device_memory( d_A, d_A_inv, m, n, lda, cutoff );
A_inv[0] = (double*) malloc(A_size);
cudaStat = cudaMemcpy(A_inv[0], d_A_inv, A_size, cudaMemcpyDeviceToHost);
cleanup:
if (d_A) cudaFree(d_A);
if (d_A_inv) cudaFree(d_A_inv);
cudaDeviceReset();
return return_value;
}
int cuda_double_moore_penrose_inverse_device_memory( double *d_A, double *d_A_inv, int m, int n, int lda, double cutoff )
{
cudaError_t cudaStat = cudaSuccess;
// Start with checking the expected size of the problem.
int memory_information = check_needed_memory (lda, m, n, sizeof(double));
if ( memory_information == -1 )
{
std::cerr << "ERROR:\tNot enough GPU memory...\n";
return ERROR_CUDA_MALLOC;
}
// Some values for the following calculations
int return_value = 0;
const int S_elements = min(m,n)*n;
const long unsigned int S_size = get_size_( 1, min(m,n), size_d);
const long unsigned int S_i_size = get_size_( n, min(m,n), size_d);
const long unsigned int U_size = get_size_( m, min(m,n), size_d);
const long unsigned int V_size = get_size_( n, min(m,n), size_d);
const long unsigned int A_size = get_size_(lda, n , size_d);
// Device pointers
// double *d_A; // Initial matrix A
double *d_S; // Vector of singular values
double *d_S_inv; // diag[ 1 / sigma_i ]
double *d_U; // SVD matrix U
double *d_V; // SVD matrix V
double *d_VS; // V * diag[1/sigma_i]
double *U;
d_S = d_S_inv = d_U = d_V = d_VS = U = NULL;
// Allocate svd memory on GPU
cudaStat = cudaMalloc ((void**)&d_U, U_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// cudaStat = cudaMalloc ((void**)&d_A, A_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
cudaStat = cudaMalloc ((void**)&d_V, V_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
cudaStat = cudaMalloc ((void**)&d_S, S_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
if ( !memory_information )
{
cudaStat = cudaMalloc ((void**)&d_S_inv, S_i_size);
if (cudaSuccess != cudaStat)
{
return_value = ERROR_CUDA_MALLOC;
goto cleanup;
}
}
// Copy initial matrix on GPU
// cudaStat = cudaMemcpy(d_A, A, A_size, cudaMemcpyHostToDevice); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_COPY; goto cleanup; }
// Compute the svd
return_value = cuda_double_svd_device_memory( d_A, d_S, d_U, d_V, m, n, lda );
if ( return_value )
{
std::cout << "svd_error = " << return_value << std::endl;
goto cleanup;
}
if ( memory_information )
{
U = (double*) malloc (U_size);
cudaStat = cudaMemcpy(U, d_U, U_size, cudaMemcpyDeviceToHost); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_COPY; goto cleanup; }
// if (d_A) { cudaFree(d_A); d_A = NULL; }
if (d_U) { cudaFree(d_U); d_U = NULL; }
cudaStat = cudaMalloc ((void**)&d_S_inv, S_i_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
}
// Invert the singular values and write them on the diagonal of S_inv
if ( n < THREADS_PER_BLOCK )
cuda_invert_singular_values_device_memory<<<m,n>>> ( d_S, d_S_inv, cutoff, n, S_elements );
else
{
int Number_of_blocks = (S_elements/THREADS_PER_BLOCK+1); // +1 -> else last sigma might be skiped!
cuda_invert_singular_values_device_memory<<<Number_of_blocks,THREADS_PER_BLOCK>>> ( d_S, d_S_inv, cutoff, n, S_elements );
}
// Free the vector S
if (d_S) { cudaFree(d_S); d_S = NULL; }
// Allocate the memory for the product V*S_inv
cudaStat = cudaMalloc ((void**)&d_VS, V_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// Compute the product
// If "small svd is used we have to choose 't' for matrix d_V!
cuda_dgemm('t', 'n', n, n, n, 1.0, d_V, n, d_S_inv, n, 0.0, d_VS, n );
// Free V since it is not needed anymore
if (d_V) { cudaFree(d_V); d_V = NULL; }
if (d_S_inv) { cudaFree(d_S_inv); d_S_inv = NULL; }
// If U was shifted to main RAM write the respective values back
// in initial dRAM and remove S_inv
if ( memory_information)
{
cudaStat = cudaMalloc ((void**)&d_U, U_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// cudaStat = cudaMalloc ((void**)&d_A, U_size); if (cudaSuccess != cudaStat) return ERROR_CUDA_MALLOC;
cudaStat = cudaMemcpy(d_U, U, U_size, cudaMemcpyHostToDevice); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_COPY; goto cleanup; }
free(U); U = NULL;
}
// Allocate the memory for the final matrix A
//cudaStat = cudaMalloc ((void**)&d_A_inv, A_size); if (cudaSuccess != cudaStat) { return_value = ERROR_CUDA_MALLOC; goto cleanup; }
// And perform the final product
cuda_dgemm('n', 't', n, m, n, 1.0, d_VS, n, d_U, lda, 0.0, d_A_inv, n );
// Copy A_inverse back on host memory
// A_inv[0] = (double*) malloc( A_size);
// cudaStat = cudaMemcpy(A_inv[0], d_A, A_size, cudaMemcpyDeviceToHost);
// Free device memory and make sure that everyhting is freed!
cleanup:
// if (d_A) cudaFree(d_A);
if (d_S) cudaFree(d_S);
if (d_S_inv) cudaFree(d_S_inv);
if (d_U) cudaFree(d_U);
if (d_V) cudaFree(d_V);
if (d_VS) cudaFree(d_VS);
if (U) free(U);
// cudaDeviceReset();
return return_value;
}
|
929a0bbad27d293ee6785ed30d7490c4fa9bb562.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "kernels/reduce.cuh"
#include "bfs_push_cuda.cuh"
static const int __tb_FirstItr_BFS = TB_SIZE;
static const int __tb_BFS = TB_SIZE;
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, unsigned long long local_src_node, uint32_t * p_dist_current, uint32_t * p_dist_old)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_dist_current[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
p_dist_old[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
}
}
// FP: "8 -> 9;
}
__global__ void FirstItr_BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_FirstItr_BFS;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
p_dist_old[src] = p_dist_current[src];
}
// FP: "10 -> 11;
// FP: "13 -> 14;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "14 -> 15;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "15 -> 16;
_np_closure[threadIdx.x].src = src;
// FP: "16 -> 17;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "19 -> 20;
// FP: "20 -> 21;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "21 -> 22;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "22 -> 23;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "25 -> 26;
__syncthreads();
// FP: "26 -> 27;
while (true)
{
// FP: "27 -> 28;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
break;
}
// FP: "35 -> 36;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "38 -> 39;
__syncthreads();
// FP: "39 -> 40;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "40 -> 41;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "43 -> 44;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "44 -> 45;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "57 -> 58;
__syncthreads();
}
// FP: "59 -> 60;
// FP: "60 -> 61;
{
const int warpid = threadIdx.x / 32;
// FP: "61 -> 62;
const int _np_laneid = cub::LaneId();
// FP: "62 -> 63;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "85 -> 86;
__syncthreads();
// FP: "86 -> 87;
}
// FP: "87 -> 88;
__syncthreads();
// FP: "88 -> 89;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "89 -> 90;
while (_np.work())
{
// FP: "90 -> 91;
int _np_i =0;
// FP: "91 -> 92;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
// FP: "94 -> 95;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "108 -> 109;
_np.execute_round_done(ITSIZE);
// FP: "109 -> 110;
__syncthreads();
}
// FP: "111 -> 112;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "113 -> 114;
}
__global__ void BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_priority, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current, HGAccumulator<unsigned int> DGAccumulator_accum, HGAccumulator<unsigned int> work_items)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_BFS;
__shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
__shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage work_items_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
work_items.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_dist_old[src] > p_dist_current[src])
{
DGAccumulator_accum.reduce( 1);
if (local_priority > p_dist_current[src])
{
p_dist_old[src] = p_dist_current[src];
work_items.reduce( 1);
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "15 -> 16;
// FP: "18 -> 19;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "19 -> 20;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "20 -> 21;
_np_closure[threadIdx.x].src = src;
// FP: "21 -> 22;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "24 -> 25;
// FP: "25 -> 26;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "26 -> 27;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "27 -> 28;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
while (true)
{
// FP: "32 -> 33;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "35 -> 36;
__syncthreads();
// FP: "36 -> 37;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
break;
}
// FP: "40 -> 41;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "43 -> 44;
__syncthreads();
// FP: "44 -> 45;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "45 -> 46;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "48 -> 49;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "49 -> 50;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "62 -> 63;
__syncthreads();
}
// FP: "64 -> 65;
// FP: "65 -> 66;
{
const int warpid = threadIdx.x / 32;
// FP: "66 -> 67;
const int _np_laneid = cub::LaneId();
// FP: "67 -> 68;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "90 -> 91;
__syncthreads();
// FP: "91 -> 92;
}
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "94 -> 95;
while (_np.work())
{
// FP: "95 -> 96;
int _np_i =0;
// FP: "96 -> 97;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "97 -> 98;
__syncthreads();
// FP: "98 -> 99;
// FP: "99 -> 100;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "113 -> 114;
_np.execute_round_done(ITSIZE);
// FP: "114 -> 115;
__syncthreads();
}
// FP: "116 -> 117;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "119 -> 120;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
work_items.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(work_items_ts);
// FP: "120 -> 121;
}
__global__ void BFSSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_dist_current, HGAccumulator<uint64_t> DGAccumulator_sum, HGReduceMax<uint32_t> DGMax)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ hipcub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGMax_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_sum.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGMax.thread_entry();
// FP: "5 -> 6;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_dist_current[src] < local_infinity)
{
DGAccumulator_sum.reduce( 1);
DGMax.reduce(p_dist_current[src]);
}
}
}
// FP: "14 -> 15;
DGAccumulator_sum.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "15 -> 16;
DGMax.thread_exit<hipcub::BlockReduce<uint32_t, TB_SIZE> >(DGMax_ts);
// FP: "16 -> 17;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_infinity, local_src_node, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( FirstItr_BFS) , dim3(blocks), dim3(__tb_FirstItr_BFS), 0, 0, ctx->gg, __begin, __end, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void FirstItr_BFS_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void BFS_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
HGAccumulator<unsigned int> _work_items;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<unsigned int> work_itemsval = Shared<unsigned int>(1);
*(work_itemsval.cpu_wr_ptr()) = 0;
_work_items.rv = work_itemsval.gpu_wr_ptr();
hipLaunchKernelGGL(( BFS) , dim3(blocks), dim3(__tb_BFS), 0, 0, ctx->gg, __begin, __end, local_priority, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()), _DGAccumulator_accum, _work_items);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
work_items = *(work_itemsval.cpu_rd_ptr());
}
void BFS_allNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_masterNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_sum;
HGReduceMax<uint32_t> _DGMax;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_sumval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<uint32_t> DGMaxval = Shared<uint32_t>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGMaxval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGMax.rv = DGMaxval.gpu_wr_ptr();
// FP: "12 -> 13;
hipLaunchKernelGGL(( BFSSanityCheck) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_infinity, ctx->dist_current.data.gpu_wr_ptr(), _DGAccumulator_sum, _DGMax);
// FP: "13 -> 14;
check_cuda_kernel;
// FP: "14 -> 15;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "15 -> 16;
DGMax = *(DGMaxval.cpu_rd_ptr());
// FP: "16 -> 17;
}
void BFSSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
| 929a0bbad27d293ee6785ed30d7490c4fa9bb562.cu | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "kernels/reduce.cuh"
#include "bfs_push_cuda.cuh"
static const int __tb_FirstItr_BFS = TB_SIZE;
static const int __tb_BFS = TB_SIZE;
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, unsigned long long local_src_node, uint32_t * p_dist_current, uint32_t * p_dist_old)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_dist_current[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
p_dist_old[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
}
}
// FP: "8 -> 9;
}
__global__ void FirstItr_BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_FirstItr_BFS;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
p_dist_old[src] = p_dist_current[src];
}
// FP: "10 -> 11;
// FP: "13 -> 14;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "14 -> 15;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "15 -> 16;
_np_closure[threadIdx.x].src = src;
// FP: "16 -> 17;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "19 -> 20;
// FP: "20 -> 21;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "21 -> 22;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "22 -> 23;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "25 -> 26;
__syncthreads();
// FP: "26 -> 27;
while (true)
{
// FP: "27 -> 28;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
break;
}
// FP: "35 -> 36;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "38 -> 39;
__syncthreads();
// FP: "39 -> 40;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "40 -> 41;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "43 -> 44;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "44 -> 45;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "57 -> 58;
__syncthreads();
}
// FP: "59 -> 60;
// FP: "60 -> 61;
{
const int warpid = threadIdx.x / 32;
// FP: "61 -> 62;
const int _np_laneid = cub::LaneId();
// FP: "62 -> 63;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "85 -> 86;
__syncthreads();
// FP: "86 -> 87;
}
// FP: "87 -> 88;
__syncthreads();
// FP: "88 -> 89;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "89 -> 90;
while (_np.work())
{
// FP: "90 -> 91;
int _np_i =0;
// FP: "91 -> 92;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
// FP: "94 -> 95;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "108 -> 109;
_np.execute_round_done(ITSIZE);
// FP: "109 -> 110;
__syncthreads();
}
// FP: "111 -> 112;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "113 -> 114;
}
__global__ void BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_priority, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current, HGAccumulator<unsigned int> DGAccumulator_accum, HGAccumulator<unsigned int> work_items)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_BFS;
__shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
__shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage work_items_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
work_items.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_dist_old[src] > p_dist_current[src])
{
DGAccumulator_accum.reduce( 1);
if (local_priority > p_dist_current[src])
{
p_dist_old[src] = p_dist_current[src];
work_items.reduce( 1);
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "15 -> 16;
// FP: "18 -> 19;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "19 -> 20;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "20 -> 21;
_np_closure[threadIdx.x].src = src;
// FP: "21 -> 22;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "24 -> 25;
// FP: "25 -> 26;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "26 -> 27;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "27 -> 28;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
while (true)
{
// FP: "32 -> 33;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "35 -> 36;
__syncthreads();
// FP: "36 -> 37;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
break;
}
// FP: "40 -> 41;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "43 -> 44;
__syncthreads();
// FP: "44 -> 45;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "45 -> 46;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "48 -> 49;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "49 -> 50;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "62 -> 63;
__syncthreads();
}
// FP: "64 -> 65;
// FP: "65 -> 66;
{
const int warpid = threadIdx.x / 32;
// FP: "66 -> 67;
const int _np_laneid = cub::LaneId();
// FP: "67 -> 68;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "90 -> 91;
__syncthreads();
// FP: "91 -> 92;
}
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "94 -> 95;
while (_np.work())
{
// FP: "95 -> 96;
int _np_i =0;
// FP: "96 -> 97;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "97 -> 98;
__syncthreads();
// FP: "98 -> 99;
// FP: "99 -> 100;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "113 -> 114;
_np.execute_round_done(ITSIZE);
// FP: "114 -> 115;
__syncthreads();
}
// FP: "116 -> 117;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "119 -> 120;
DGAccumulator_accum.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
work_items.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(work_items_ts);
// FP: "120 -> 121;
}
__global__ void BFSSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_dist_current, HGAccumulator<uint64_t> DGAccumulator_sum, HGReduceMax<uint32_t> DGMax)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ cub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ cub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGMax_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_sum.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGMax.thread_entry();
// FP: "5 -> 6;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_dist_current[src] < local_infinity)
{
DGAccumulator_sum.reduce( 1);
DGMax.reduce(p_dist_current[src]);
}
}
}
// FP: "14 -> 15;
DGAccumulator_sum.thread_exit<cub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "15 -> 16;
DGMax.thread_exit<cub::BlockReduce<uint32_t, TB_SIZE> >(DGMax_ts);
// FP: "16 -> 17;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, local_src_node, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
FirstItr_BFS <<<blocks, __tb_FirstItr_BFS>>>(ctx->gg, __begin, __end, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void FirstItr_BFS_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void BFS_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
HGAccumulator<unsigned int> _work_items;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<unsigned int> work_itemsval = Shared<unsigned int>(1);
*(work_itemsval.cpu_wr_ptr()) = 0;
_work_items.rv = work_itemsval.gpu_wr_ptr();
BFS <<<blocks, __tb_BFS>>>(ctx->gg, __begin, __end, local_priority, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()), _DGAccumulator_accum, _work_items);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
work_items = *(work_itemsval.cpu_rd_ptr());
}
void BFS_allNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_masterNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_sum;
HGReduceMax<uint32_t> _DGMax;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_sumval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<uint32_t> DGMaxval = Shared<uint32_t>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGMaxval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGMax.rv = DGMaxval.gpu_wr_ptr();
// FP: "12 -> 13;
BFSSanityCheck <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, ctx->dist_current.data.gpu_wr_ptr(), _DGAccumulator_sum, _DGMax);
// FP: "13 -> 14;
check_cuda_kernel;
// FP: "14 -> 15;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "15 -> 16;
DGMax = *(DGMaxval.cpu_rd_ptr());
// FP: "16 -> 17;
}
void BFSSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
|
dae7201353f930077f2c51caa92dc28813f8b53e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the set calculus toolbox scheme, i.e., to calculate gradients,normal vectors,
* curvatures, Heaviside function and Dirac_Delta function
******************************************************************************/
#include "shared_utilities.cuh"
#include "shared_utilities.cup"
__global__
void auxi_set_calculus_toolbox(double * Ax, double * Ay, double * Az, double * AGradMag, double * ACx, double * ACy, double * ACz, double * ANormCrossAF, double * Tx, double * Ty, double * Tz, double * Anx, double * Any, double * Anz, double * Axx, double * Ayy, double * Azz, double * Axy, double * Ayz, double * Azx, double * ALaplacian, double * GeodesicCurvature, double * NormalCurvature, double * GeodesicTorsion, double * BPerpendicular, double * AHeaviside, double * ADiracDelta, double const * lsf, double const * AHPrimal, double const * Fx, double const * Fy, double const * Fz, double const * FGradMag, double const * Nx, double const * Ny, double const * Nz, double const * Fxx, double const * Fyy, double const * Fzz, double const * Fxy, double const * Fyz, double const * Fzx, int rows, int cols, int pges, double dx, double dy, double dz, double ds, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
double ax = (lsf[right] - lsf[left]) / (2*dx);
double ay = (lsf[front] - lsf[back]) / (2*dy);
double az = (lsf[up] - lsf[down]) / (2*dz);
double aGradMag = norm(ax, ay, az);
Ax[ind] = ax;
Ay[ind] = ay;
Az[ind] = az;
AGradMag[ind] = aGradMag;
double fx = Fx[ind];
double fy = Fy[ind];
double fz = Fz[ind];
double Cx, Cy, Cz;
cross_product(Cx,Cy,Cz,fx,fy,fz,ax,ay,az);
double NormCrossAF = norm(Cx,Cy,Cz);
ACx[ind] = Cx;
ACy[ind] = Cy;
ACz[ind] = Cz;
ANormCrossAF[ind] = NormCrossAF;
double tx = Cx / NormCrossAF;
double ty = Cy / NormCrossAF;
double tz = Cz / NormCrossAF;
Tx[ind] = tx;
Ty[ind] = ty;
Tz[ind] = tz;
double fNx = Nx[ind];
double fNy = Ny[ind];
double fNz = Nz[ind];
double nx, ny, nz;
cross_product(nx,ny,nz,tx,ty,tz,fNx,fNy,fNz);
Anx[ind] = nx;
Any[ind] = ny;
Anz[ind] = nz;
int front_right = sub2ind(row_idx+1, col_idx+1, pge_idx, rows, cols, pges);
int back_left = sub2ind(row_idx-1, col_idx-1, pge_idx, rows, cols, pges);
int back_right = sub2ind(row_idx-1, col_idx+1, pge_idx, rows, cols, pges);
int front_left = sub2ind(row_idx+1, col_idx-1, pge_idx, rows, cols, pges);
int front_up = sub2ind(row_idx+1, col_idx, pge_idx+1, rows, cols, pges);
int back_down = sub2ind(row_idx-1, col_idx, pge_idx-1, rows, cols, pges);
int front_down = sub2ind(row_idx+1, col_idx, pge_idx-1, rows, cols, pges);
int back_up = sub2ind(row_idx-1, col_idx, pge_idx+1, rows, cols, pges);
int right_up = sub2ind(row_idx, col_idx+1, pge_idx+1, rows, cols, pges);
int left_down = sub2ind(row_idx, col_idx-1, pge_idx-1, rows, cols, pges);
int right_down = sub2ind(row_idx, col_idx+1, pge_idx-1, rows, cols, pges);
int left_up = sub2ind(row_idx, col_idx-1, pge_idx+1, rows, cols, pges);
double axx = (lsf[right] - 2*lsf[ind] + lsf[left]) / (dx*dx);
double ayy = (lsf[front] - 2*lsf[ind] + lsf[back]) / (dy*dy);
double azz = (lsf[up] - 2*lsf[ind] + lsf[down]) / (dz*dz);
double axy = (lsf[front_right]+lsf[back_left]-lsf[front_left]-lsf[back_right]) / (4*ds*ds);
double ayz = (lsf[front_up]+lsf[back_down]-lsf[front_down]-lsf[back_up]) / (4*ds*ds);
double azx = (lsf[right_up]+lsf[left_down]-lsf[right_down]-lsf[left_up]) / (4*ds*ds);
double aLaplacian = axx + ayy + azz;
Axx[ind] = axx;
Ayy[ind] = ayy;
Azz[ind] = azz;
Axy[ind] = axy;
Ayz[ind] = ayz;
Azx[ind] = azx;
ALaplacian[ind] = aLaplacian;
// geodesic curvature
double fxx = Fxx[ind];
double fyy = Fyy[ind];
double fzz = Fzz[ind];
double fxy = Fxy[ind];
double fyz = Fyz[ind];
double fzx = Fzx[ind];
double fGradMag = FGradMag[ind];
double vx = tx*fxx + ty*fxy + tz*fzx;
double vy = tx*fxy + ty*fyy + tz*fyz;
double vz = tx*fzx + ty*fyz + tz*fzz;
double w1x, w1y, w1z;
cross_product(w1x,w1y,w1z,vx,vy,vz,ax,ay,az);
vx = tx*axx + ty*axy + tz*azx;
vy = tx*axy + ty*ayy + tz*ayz;
vz = tx*azx + ty*ayz + tz*azz;
double w2x, w2y, w2z;
cross_product(w2x,w2y,w2z,fx,fy,fz,vx,vy,vz);
GeodesicCurvature[ind] = ( nx*(w1x+w2x) + ny*(w1y+w2y) + nz*(w1z+w2z) ) / NormCrossAF;
/* NormalCurvature, GeodesicTorsion, BPerpendicular */
double Nxx = fxx / fGradMag - fx*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ;
double Nyx = fxy / fGradMag - fy*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ;
double Nzx = fzx / fGradMag - fz*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ;
double Nxy = fxy / fGradMag - fx*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ;
double Nyy = fyy / fGradMag - fy*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ;
double Nzy = fyz / fGradMag - fz*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ;
double Nxz = fzx / fGradMag - fx*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ;
double Nyz = fyz / fGradMag - fy*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ;
double Nzz = fzz / fGradMag - fz*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ;
// NormalCurvature.
vx = Nxx * tx + Nxy * ty + Nxz * tz;
vy = Nyx * tx + Nyy * ty + Nyz * tz;
vz = Nzx * tx + Nzy * ty + Nzz * tz;
NormalCurvature[ind] = - (tx*vx + ty*vy + tz*vz);
// GeodesicTorsion, BPerpendicular
vx = Nxx * nx + Nxy * ny + Nxz * nz;
vy = Nyx * nx + Nyy * ny + Nyz * nz;
vz = Nzx * nx + Nzy * ny + Nzz * nz;
GeodesicTorsion[ind] = - (tx*vx + ty*vy + tz*vz);
BPerpendicular[ind] = - (nx*vx + ny*vy + nz*vz);
/*primal of Heaviside(A), Heaviside(A), DiracDelta(A)*/
// calculate Heaviside function
double px = (AHPrimal[right] - AHPrimal[left]) / (2*dx);
double py = (AHPrimal[front] - AHPrimal[back]) / (2*dy);
double pz = (AHPrimal[up] - AHPrimal[down]) / (2*dz);
double dot_DAHPrimal_DF = px*ax + py*ay + pz*az;
AHeaviside[ind] = dot_DAHPrimal_DF / pow(aGradMag,2);
// calculate DiraDelta function
double pxx = (AHPrimal[right] - 2*AHPrimal[ind] +AHPrimal[left]) / (dx*dx);
double pyy = (AHPrimal[front] - 2*AHPrimal[ind] + AHPrimal[back]) / (dy*dy);
double pzz = (AHPrimal[up] - 2*AHPrimal[ind] + AHPrimal[down]) / (dz*dz);
double pLaplacian = pxx + pyy + pzz;
ADiracDelta[ind] = pLaplacian/pow(aGradMag,2) - dot_DAHPrimal_DF*aLaplacian/pow(aGradMag,4);
}
| dae7201353f930077f2c51caa92dc28813f8b53e.cu | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the set calculus toolbox scheme, i.e., to calculate gradients,normal vectors,
* curvatures, Heaviside function and Dirac_Delta function
******************************************************************************/
#include "shared_utilities.cuh"
#include "shared_utilities.cup"
__global__
void auxi_set_calculus_toolbox(double * Ax, double * Ay, double * Az, double * AGradMag, double * ACx, double * ACy, double * ACz, double * ANormCrossAF, double * Tx, double * Ty, double * Tz, double * Anx, double * Any, double * Anz, double * Axx, double * Ayy, double * Azz, double * Axy, double * Ayz, double * Azx, double * ALaplacian, double * GeodesicCurvature, double * NormalCurvature, double * GeodesicTorsion, double * BPerpendicular, double * AHeaviside, double * ADiracDelta, double const * lsf, double const * AHPrimal, double const * Fx, double const * Fy, double const * Fz, double const * FGradMag, double const * Nx, double const * Ny, double const * Nz, double const * Fxx, double const * Fyy, double const * Fzz, double const * Fxy, double const * Fyz, double const * Fzx, int rows, int cols, int pges, double dx, double dy, double dz, double ds, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
double ax = (lsf[right] - lsf[left]) / (2*dx);
double ay = (lsf[front] - lsf[back]) / (2*dy);
double az = (lsf[up] - lsf[down]) / (2*dz);
double aGradMag = norm(ax, ay, az);
Ax[ind] = ax;
Ay[ind] = ay;
Az[ind] = az;
AGradMag[ind] = aGradMag;
double fx = Fx[ind];
double fy = Fy[ind];
double fz = Fz[ind];
double Cx, Cy, Cz;
cross_product(Cx,Cy,Cz,fx,fy,fz,ax,ay,az);
double NormCrossAF = norm(Cx,Cy,Cz);
ACx[ind] = Cx;
ACy[ind] = Cy;
ACz[ind] = Cz;
ANormCrossAF[ind] = NormCrossAF;
double tx = Cx / NormCrossAF;
double ty = Cy / NormCrossAF;
double tz = Cz / NormCrossAF;
Tx[ind] = tx;
Ty[ind] = ty;
Tz[ind] = tz;
double fNx = Nx[ind];
double fNy = Ny[ind];
double fNz = Nz[ind];
double nx, ny, nz;
cross_product(nx,ny,nz,tx,ty,tz,fNx,fNy,fNz);
Anx[ind] = nx;
Any[ind] = ny;
Anz[ind] = nz;
int front_right = sub2ind(row_idx+1, col_idx+1, pge_idx, rows, cols, pges);
int back_left = sub2ind(row_idx-1, col_idx-1, pge_idx, rows, cols, pges);
int back_right = sub2ind(row_idx-1, col_idx+1, pge_idx, rows, cols, pges);
int front_left = sub2ind(row_idx+1, col_idx-1, pge_idx, rows, cols, pges);
int front_up = sub2ind(row_idx+1, col_idx, pge_idx+1, rows, cols, pges);
int back_down = sub2ind(row_idx-1, col_idx, pge_idx-1, rows, cols, pges);
int front_down = sub2ind(row_idx+1, col_idx, pge_idx-1, rows, cols, pges);
int back_up = sub2ind(row_idx-1, col_idx, pge_idx+1, rows, cols, pges);
int right_up = sub2ind(row_idx, col_idx+1, pge_idx+1, rows, cols, pges);
int left_down = sub2ind(row_idx, col_idx-1, pge_idx-1, rows, cols, pges);
int right_down = sub2ind(row_idx, col_idx+1, pge_idx-1, rows, cols, pges);
int left_up = sub2ind(row_idx, col_idx-1, pge_idx+1, rows, cols, pges);
double axx = (lsf[right] - 2*lsf[ind] + lsf[left]) / (dx*dx);
double ayy = (lsf[front] - 2*lsf[ind] + lsf[back]) / (dy*dy);
double azz = (lsf[up] - 2*lsf[ind] + lsf[down]) / (dz*dz);
double axy = (lsf[front_right]+lsf[back_left]-lsf[front_left]-lsf[back_right]) / (4*ds*ds);
double ayz = (lsf[front_up]+lsf[back_down]-lsf[front_down]-lsf[back_up]) / (4*ds*ds);
double azx = (lsf[right_up]+lsf[left_down]-lsf[right_down]-lsf[left_up]) / (4*ds*ds);
double aLaplacian = axx + ayy + azz;
Axx[ind] = axx;
Ayy[ind] = ayy;
Azz[ind] = azz;
Axy[ind] = axy;
Ayz[ind] = ayz;
Azx[ind] = azx;
ALaplacian[ind] = aLaplacian;
// geodesic curvature
double fxx = Fxx[ind];
double fyy = Fyy[ind];
double fzz = Fzz[ind];
double fxy = Fxy[ind];
double fyz = Fyz[ind];
double fzx = Fzx[ind];
double fGradMag = FGradMag[ind];
double vx = tx*fxx + ty*fxy + tz*fzx;
double vy = tx*fxy + ty*fyy + tz*fyz;
double vz = tx*fzx + ty*fyz + tz*fzz;
double w1x, w1y, w1z;
cross_product(w1x,w1y,w1z,vx,vy,vz,ax,ay,az);
vx = tx*axx + ty*axy + tz*azx;
vy = tx*axy + ty*ayy + tz*ayz;
vz = tx*azx + ty*ayz + tz*azz;
double w2x, w2y, w2z;
cross_product(w2x,w2y,w2z,fx,fy,fz,vx,vy,vz);
GeodesicCurvature[ind] = ( nx*(w1x+w2x) + ny*(w1y+w2y) + nz*(w1z+w2z) ) / NormCrossAF;
/* NormalCurvature, GeodesicTorsion, BPerpendicular */
double Nxx = fxx / fGradMag - fx*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ;
double Nyx = fxy / fGradMag - fy*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ;
double Nzx = fzx / fGradMag - fz*(fxx*fx + fxy*fy + fzx*fz) / pow(fGradMag,3) ;
double Nxy = fxy / fGradMag - fx*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ;
double Nyy = fyy / fGradMag - fy*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ;
double Nzy = fyz / fGradMag - fz*(fxy*fx + fyy*fy + fyz*fz) / pow(fGradMag,3) ;
double Nxz = fzx / fGradMag - fx*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ;
double Nyz = fyz / fGradMag - fy*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ;
double Nzz = fzz / fGradMag - fz*(fzx*fx + fyz*fy + fzz*fz) / pow(fGradMag,3) ;
// NormalCurvature.
vx = Nxx * tx + Nxy * ty + Nxz * tz;
vy = Nyx * tx + Nyy * ty + Nyz * tz;
vz = Nzx * tx + Nzy * ty + Nzz * tz;
NormalCurvature[ind] = - (tx*vx + ty*vy + tz*vz);
// GeodesicTorsion, BPerpendicular
vx = Nxx * nx + Nxy * ny + Nxz * nz;
vy = Nyx * nx + Nyy * ny + Nyz * nz;
vz = Nzx * nx + Nzy * ny + Nzz * nz;
GeodesicTorsion[ind] = - (tx*vx + ty*vy + tz*vz);
BPerpendicular[ind] = - (nx*vx + ny*vy + nz*vz);
/*primal of Heaviside(A), Heaviside(A), DiracDelta(A)*/
// calculate Heaviside function
double px = (AHPrimal[right] - AHPrimal[left]) / (2*dx);
double py = (AHPrimal[front] - AHPrimal[back]) / (2*dy);
double pz = (AHPrimal[up] - AHPrimal[down]) / (2*dz);
double dot_DAHPrimal_DF = px*ax + py*ay + pz*az;
AHeaviside[ind] = dot_DAHPrimal_DF / pow(aGradMag,2);
// calculate DiraDelta function
double pxx = (AHPrimal[right] - 2*AHPrimal[ind] +AHPrimal[left]) / (dx*dx);
double pyy = (AHPrimal[front] - 2*AHPrimal[ind] + AHPrimal[back]) / (dy*dy);
double pzz = (AHPrimal[up] - 2*AHPrimal[ind] + AHPrimal[down]) / (dz*dz);
double pLaplacian = pxx + pyy + pzz;
ADiracDelta[ind] = pLaplacian/pow(aGradMag,2) - dot_DAHPrimal_DF*aLaplacian/pow(aGradMag,4);
}
|
41a062365b2be3bbe62a913eac49fa50ccc2f61d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CLOCKS_PAR_SEC 1000000l
#define N 256
/************************************************************************/
/* Example */
/************************************************************************/
__global__ void matVec(float *a, float *b, float *c)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int index = x * N;
float tmp = 0;
for (int i=0; i<N; i++)
{
tmp += a[index + i] * b[i];
}
c[x] = tmp;
}
/************************************************************************/
/* Main */
/************************************************************************/
int main(int argc, char* argv[])
{
float *host_a, *host_b, *host_c;
float *dev_a, *dev_b, *dev_c;
const int size = N * sizeof(float);
host_a = (float*)malloc( size * N);
host_b = (float*)malloc( size);
host_c = (float*)malloc( size);
// hipHostMalloc(&dev_a, size*N, hipHostMallocDefault);
hipMalloc( (void**)&dev_a, size * N);
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
for (int i = 0; i < N*N; ++i)
{
host_a[i] = 3.0;
}
for (int i = 0; i < N; ++i )
{
host_b[i] = 2.0;
}
/* mesure du temps d'execution */
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
/* Copie des donnes vers le GPU */
hipMemcpy(dev_a, host_a, size * N, hipMemcpyHostToDevice);
hipMemcpy(dev_b, host_b, size, hipMemcpyHostToDevice);
/* execution de l'opration sur GPU */
dim3 ThreadPerBlock ( 128 , 1 );
dim3 BlockPerGrid ( N/128 , 1 );
hipLaunchKernelGGL(( matVec), dim3(BlockPerGrid), dim3(ThreadPerBlock), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy( host_c, dev_c, size, hipMemcpyDeviceToHost );
/* Fin de la mesure du temps d'execution du programme */
hipEventRecord(stop, 0);
hipEventSynchronize( stop );
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree( dev_c );
/* vrification des rsultats */
for (int i=0; i<N; i++)
{
if (host_c[i] != 6*N)
{
// printf("erreur l'adresse %d \n", i);
printf("c[%3d] = %5.1f \n", i, host_c[i] );
}
}
/* affichage du temps d'execution */
printf("temps coule sur GPU : %f ms \n", time);
/**********************************************
execution de la mme opration sur CPU
**********************************************/
int j=0;
clock_t t1, t2;
double tempsCPU;
t1 = clock();
/* execution de l'opration sur CPU */
for (j=0; j<1000; j++)
{
for (int i=0; i<N; i++)
host_c[i] = host_a[i] + host_b[i];
}
t2 = clock();
tempsCPU = (double)difftime(t2, t1)/(double)CLOCKS_PAR_SEC;
/* affichage du temps d'execution */
printf("temps coule sur CPU: %f ms \n", tempsCPU * 1000.0 / j);
free(host_a);
free(host_b);
free(host_c);
return EXIT_SUCCESS;
}
| 41a062365b2be3bbe62a913eac49fa50ccc2f61d.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CLOCKS_PAR_SEC 1000000l
#define N 256
/************************************************************************/
/* Example */
/************************************************************************/
__global__ void matVec(float *a, float *b, float *c)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int index = x * N;
float tmp = 0;
for (int i=0; i<N; i++)
{
tmp += a[index + i] * b[i];
}
c[x] = tmp;
}
/************************************************************************/
/* Main */
/************************************************************************/
int main(int argc, char* argv[])
{
float *host_a, *host_b, *host_c;
float *dev_a, *dev_b, *dev_c;
const int size = N * sizeof(float);
host_a = (float*)malloc( size * N);
host_b = (float*)malloc( size);
host_c = (float*)malloc( size);
// cudaHostAlloc(&dev_a, size*N, cudaHostAllocDefault);
cudaMalloc( (void**)&dev_a, size * N);
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
for (int i = 0; i < N*N; ++i)
{
host_a[i] = 3.0;
}
for (int i = 0; i < N; ++i )
{
host_b[i] = 2.0;
}
/* mesure du temps d'execution */
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Copie des données vers le GPU */
cudaMemcpy(dev_a, host_a, size * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, size, cudaMemcpyHostToDevice);
/* execution de l'opération sur GPU */
dim3 ThreadPerBlock ( 128 , 1 );
dim3 BlockPerGrid ( N/128 , 1 );
matVec<<<BlockPerGrid, ThreadPerBlock>>>(dev_a, dev_b, dev_c);
cudaMemcpy( host_c, dev_c, size, cudaMemcpyDeviceToHost );
/* Fin de la mesure du temps d'execution du programme */
cudaEventRecord(stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree( dev_c );
/* vérification des résultats */
for (int i=0; i<N; i++)
{
if (host_c[i] != 6*N)
{
// printf("erreur à l'adresse %d \n", i);
printf("c[%3d] = %5.1f \n", i, host_c[i] );
}
}
/* affichage du temps d'execution */
printf("temps écoule sur GPU : %f ms \n", time);
/**********************************************
execution de la même opération sur CPU
**********************************************/
int j=0;
clock_t t1, t2;
double tempsCPU;
t1 = clock();
/* execution de l'opération sur CPU */
for (j=0; j<1000; j++)
{
for (int i=0; i<N; i++)
host_c[i] = host_a[i] + host_b[i];
}
t2 = clock();
tempsCPU = (double)difftime(t2, t1)/(double)CLOCKS_PAR_SEC;
/* affichage du temps d'execution */
printf("temps écoule sur CPU: %f ms \n", tempsCPU * 1000.0 / j);
free(host_a);
free(host_b);
free(host_c);
return EXIT_SUCCESS;
}
|
c5361964af2c2d34f4bc4ad0da460f54fd3545b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void poly_div5(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+1.0/x;
}
} | c5361964af2c2d34f4bc4ad0da460f54fd3545b0.cu | #include "includes.h"
__global__ void poly_div5(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+1.0/x;
}
} |
b301289f089152e7728688e928717c52e3502c1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018 Zhao Zhixu
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <string.h>
#include <assert.h>
#include <stdarg.h>
#include <math.h>
#include "tl_tensor.h"
#include "tl_util.h"
#define BLOCK_SIZE 512
#define BLOCK_NUM(bs, tn) (((tn) + (bs) - 1) / (bs))
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static inline __device__ int get_index(const int *ids, int ndim, const int *dims)
{
int i, id;
for (i = 0, id = ids[0]; i < ndim-1; i++)
id = dims[i+1] * id + ids[i+1];
return id;
}
static inline __device__ void get_coords(int id, int *ids, int ndim, const int *dims)
{
for (int i = ndim-1; i >= 0; i--) {
ids[i] = id % dims[i];
id /= dims[i];
}
}
static __device__ void convert_device(void *pd, tl_dtype dtype_d,
const void *ps, tl_dtype dtype_s)
{
tl_check_dtype(dtype_d);
tl_check_dtype(dtype_s);
double val_d;
float val_f;
int32_t val_i32;
uint32_t val_u32;
int16_t val_i16;
uint16_t val_u16;
int8_t val_i8;
uint8_t val_u8;
switch (dtype_d) {
case TL_DOUBLE:
switch (dtype_s) {
case TL_DOUBLE:
*(double *)pd = *(double *)ps;
break;
case TL_FLOAT:
*(double *)pd = (double)*(float *)ps;
break;
case TL_INT32:
*(double *)pd = (double)*(int32_t *)ps;
break;
case TL_INT16:
*(double *)pd = (double)*(int16_t *)ps;
break;
case TL_INT8:
*(double *)pd = (double)*(int8_t *)ps;
break;
case TL_UINT32:
*(double *)pd = (double)*(uint32_t *)ps;
break;
case TL_UINT16:
*(double *)pd = (double)*(uint16_t *)ps;
break;
case TL_UINT8:
*(double *)pd = (double)*(uint8_t *)ps;
break;
case TL_BOOL:
*(double *)pd = (double)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_FLOAT:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= FLT_MAX)
*(float *)pd = FLT_MAX;
else if (val_d <= -FLT_MAX)
*(float *)pd = -FLT_MAX;
else
*(float *)pd = (float)val_d;
break;
case TL_FLOAT:
*(float *)pd = *(float *)ps;
break;
case TL_INT32:
*(float *)pd = (float)*(int32_t *)ps;
break;
case TL_INT16:
*(float *)pd = (float)*(int16_t *)ps;
break;
case TL_INT8:
*(float *)pd = (float)*(int8_t *)ps;
break;
case TL_UINT32:
*(float *)pd = (float)*(uint32_t *)ps;
break;
case TL_UINT16:
*(float *)pd = (float)*(uint16_t *)ps;
break;
case TL_UINT8:
*(float *)pd = (float)*(uint8_t *)ps;
break;
case TL_BOOL:
*(float *)pd = (float)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= INT32_MAX)
*(int32_t *)pd = INT32_MAX;
else if (val_d <= INT32_MIN)
*(int32_t *)pd = INT32_MIN;
else
*(int32_t *)pd = (int32_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= INT32_MAX)
*(int32_t *)pd = INT32_MAX;
else if (val_f <= INT32_MIN)
*(int32_t *)pd = INT32_MIN;
else
*(int32_t *)pd = (int32_t)val_f;
break;
case TL_INT32:
*(int32_t *)pd = *(int32_t *)ps;
break;
case TL_INT16:
*(int32_t *)pd = (int32_t)*(int16_t *)ps;
break;
case TL_INT8:
*(int32_t *)pd = (int32_t)*(int8_t *)ps;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= INT32_MAX)
*(int32_t *)pd = INT32_MAX;
else
*(int32_t *)pd = (int32_t)val_u32;
break;
case TL_UINT16:
/* printf("*ps = %d\n", *(uint16_t *)ps); */
*(int32_t *)pd = (int32_t)*(uint16_t *)ps;
/* printf("*pd = %d\n", *(int32_t *)pd); */
break;
case TL_UINT8:
*(int32_t *)pd = (int32_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(int32_t *)pd = (int32_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else if (val_d <= INT16_MIN)
*(int16_t *)pd = INT16_MIN;
else
*(int16_t *)pd = (int16_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else if (val_f <= INT16_MIN)
*(int16_t *)pd = INT16_MIN;
else
*(int16_t *)pd = (int16_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else if (val_i32 <= INT16_MIN)
*(int16_t *)pd = INT16_MIN;
else
*(int16_t *)pd = (int16_t)val_i32;
break;
case TL_INT16:
*(int16_t *)pd = *(int16_t *)ps;
break;
case TL_INT8:
*(int16_t *)pd = (int16_t)*(int8_t *)ps;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else
*(int16_t *)pd = (int16_t)val_u32;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16 >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else
*(int16_t *)pd = (int16_t)val_u16;
break;
case TL_UINT8:
*(int16_t *)pd = (int16_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(int16_t *)pd = (int16_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_d <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_f <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_i32 <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_i32;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_i16 <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_i16;
break;
case TL_INT8:
*(int8_t *)pd = *(int8_t *)ps;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else
*(int8_t *)pd = (int8_t)val_u32;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else
*(int8_t *)pd = (int8_t)val_u16;
break;
case TL_UINT8:
val_u8 = *(uint8_t *)ps;
if (val_u8 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else
*(int8_t *)pd = (int8_t)val_u8;
break;
case TL_BOOL:
*(int8_t *)pd = (int8_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= UINT32_MAX)
*(uint32_t *)pd = UINT32_MAX;
else if (val_d < 0)
*(uint32_t *)pd = 0;
else
*(uint32_t *)pd = (uint32_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= UINT32_MAX)
*(uint32_t *)pd = UINT32_MAX;
else if (val_f < 0)
*(uint32_t *)pd = 0;
else
*(uint32_t *)pd = (uint32_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= 0)
*(uint32_t *)pd = (uint32_t)val_i32;
else
*(uint32_t *)pd = 0;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= 0)
*(uint32_t *)pd = (uint32_t)val_i16;
else
*(uint32_t *)pd = 0;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8 >= 0)
*(uint32_t *)pd = (uint32_t)val_i8;
else
*(uint32_t *)pd = 0;
break;
case TL_UINT32:
*(uint32_t *)pd = *(uint32_t *)ps;
break;
case TL_UINT16:
*(uint32_t *)pd = (uint32_t)*(uint16_t *)ps;
break;
case TL_UINT8:
*(uint32_t *)pd = (uint32_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(uint32_t *)pd = (uint32_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else if (val_d < 0)
*(uint16_t *)pd = 0;
else
*(uint16_t *)pd = (uint16_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else if (val_f < 0)
*(uint16_t *)pd = 0;
else
*(uint16_t *)pd = (uint16_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else if (val_i32 < 0)
*(uint16_t *)pd = 0;
else
*(uint16_t *)pd = (uint16_t)val_i32;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= 0)
*(uint16_t *)pd = (uint16_t)val_i16;
else
*(uint16_t *)pd = 0;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8 >= 0)
*(uint16_t *)pd = (uint16_t)val_i8;
else
*(uint16_t *)pd = 0;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else
*(uint16_t *)pd = (uint16_t)val_u32;
break;
case TL_UINT16:
*(uint16_t *)pd = *(uint16_t *)ps;
break;
case TL_UINT8:
*(uint16_t *)pd = (uint16_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(uint16_t *)pd = (uint16_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_d < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_f < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_i32 < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_i32;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_i16 < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_i16;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8 >= 0)
*(uint8_t *)pd = (uint8_t)val_i8;
else
*(uint8_t *)pd = 0;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else
*(uint8_t *)pd = (uint8_t)val_u32;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else
*(uint8_t *)pd = (uint8_t)val_u16;
break;
case TL_UINT8:
*(uint8_t *)pd = *(uint8_t *)ps;
break;
case TL_BOOL:
*(uint8_t *)pd = (uint8_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_BOOL:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d > 0 || val_d < 0)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f > 0 || val_f < 0)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_UINT8:
val_u8 = *(uint8_t *)ps;
if (val_u8)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_BOOL:
*(tl_bool_t *)pd = *(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
}
void tl_tensor_free_data_too_cuda(tl_tensor *t)
{
if (!t)
return;
tl_free_cuda(t->data);
tl_tensor_free(t);
}
tl_tensor *tl_tensor_zeros_cuda(int ndim, const int *dims, tl_dtype dtype)
{
tl_tensor *t;
size_t size;
t = tl_tensor_create(NULL, ndim, dims, dtype);
t->owner = t;
size = t->len * tl_size_of(dtype);
t->data = tl_alloc_cuda(size);
tl_memset_cuda(t->data, 0, size);
return t;
}
tl_tensor *tl_tensor_clone_h2d(const tl_tensor *src)
{
void *data;
tl_tensor *dst;
assert(src);
data = tl_clone_h2d(src->data, src->len*tl_size_of(src->dtype));
dst = tl_tensor_create(data, src->ndim, src->dims, src->dtype);
dst->owner = dst;
return dst;
}
tl_tensor *tl_tensor_clone_d2h(const tl_tensor *src)
{
void *data;
tl_tensor *dst;
assert(src);
data = tl_clone_d2h(src->data, src->len*tl_size_of(src->dtype));
dst = tl_tensor_create(data, src->ndim, src->dims, src->dtype);
dst->owner = dst;
return dst;
}
tl_tensor *tl_tensor_clone_d2d(const tl_tensor *src)
{
void *data;
tl_tensor *dst;
assert(src);
data = tl_clone_d2d(src->data, src->len*tl_size_of(src->dtype));
dst = tl_tensor_create(data, src->ndim, src->dims, src->dtype);
dst->owner = dst;
return dst;
}
tl_tensor *tl_tensor_repeat_h2d(const tl_tensor *src, int times)
{
void *data;
int *dims;
tl_tensor *dst;
assert(src);
data = tl_repeat_h2d(src->data, src->len*tl_size_of(src->dtype), times);
dims = (int *)tl_alloc(sizeof(int)*(src->ndim+1));
memmove(dims+1, src->dims, sizeof(int)*(src->ndim));
dims[0] = times;
dst = tl_tensor_create(data, src->ndim+1, dims, src->dtype);
dst->owner = dst;
tl_free(dims);
return dst;
}
tl_tensor *tl_tensor_repeat_d2h(const tl_tensor *src, int times)
{
void *data;
int *dims;
tl_tensor *dst;
assert(src);
data = tl_repeat_d2h(src->data, src->len*tl_size_of(src->dtype), times);
dims = (int *)tl_alloc(sizeof(int)*(src->ndim+1));
memmove(dims+1, src->dims, sizeof(int)*(src->ndim));
dims[0] = times;
dst = tl_tensor_create(data, src->ndim+1, dims, src->dtype);
dst->owner = dst;
tl_free(dims);
return dst;
}
tl_tensor *tl_tensor_repeat_d2d(const tl_tensor *src, int times)
{
void *data;
int *dims;
tl_tensor *dst;
assert(src);
data = tl_repeat_d2d(src->data, src->len*tl_size_of(src->dtype), times);
dims = (int *)tl_alloc(sizeof(int)*(src->ndim+1));
memmove(dims+1, src->dims, sizeof(int)*(src->ndim));
dims[0] = times;
dst = tl_tensor_create(data, src->ndim+1, dims, src->dtype);
dst->owner = dst;
tl_free(dims);
return dst;
}
/* arrange at host, copy to device */
tl_tensor *tl_tensor_arange_cuda(double start, double stop, double step,
tl_dtype dtype)
{
int dims[1];
void *data;
tl_tensor *dst;
double len, elem;
size_t dsize;
dsize = tl_size_of(dtype);
assert(start >= tl_dtype_min(dtype) && start <= tl_dtype_max(dtype));
assert(stop >= tl_dtype_min(dtype) && stop <= tl_dtype_max(dtype));
assert(step >= tl_dtype_min(dtype) && step <= tl_dtype_max(dtype));
assert(step != 0);
assert(stop > start); /* TODO: expand to all possibilities */
len = ceil((stop - start) / step);
if (len > INT32_MAX)
return NULL;
dims[0] = (int)len;
dst = tl_tensor_zeros_cuda(1, dims, dtype);
data = tl_tensor_zeros(1, dims, dtype);
for (int i = 0; i < dims[0]; i++) {
elem = start + step * i;
tl_convert(tl_padd(data, i, dsize), dtype, &elem, TL_DOUBLE);
}
tl_memcpy_h2d(dst->data, data, tl_size_of(dst->dtype));
return dst;
}
void tl_tensor_fprint_cuda(FILE *stream, const tl_tensor *t, const char *fmt)
{
tl_tensor *t_host;
t_host = tl_tensor_clone_d2h(t);
tl_tensor_fprint(stream, t_host, fmt);
tl_tensor_free_data_too(t_host);
}
void tl_tensor_print_cuda(const tl_tensor *t, const char *fmt)
{
tl_tensor_fprint_cuda(stdout, t, fmt);
}
int tl_tensor_save_cuda(const char *file_name, const tl_tensor *t,
const char *fmt)
{
tl_tensor *t_host;
int ret;
t_host = tl_tensor_clone_d2h(t);
ret = tl_tensor_save(file_name, t_host, fmt);
tl_tensor_free_data_too(t_host);
return ret;
}
tl_tensor *tl_tensor_zeros_slice_cuda(const tl_tensor *src, int axis, int len,
tl_dtype dtype)
{
tl_tensor *dst;
int *dims;
assert(src);
assert(axis < src->ndim && axis >= 0);
assert(len <= src->dims[axis] && len > 0);
dims = (int *)tl_clone(src->dims, sizeof(int) * src->ndim);
dims[axis] = len;
dst = tl_tensor_zeros_cuda(src->ndim, dims, dtype);
tl_free(dims);
return dst;
}
template <typename T>
static __global__ void slice_kernel(T *src, T *dst, int start, int s_vol,
int d_vol, int vol, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int si = di / d_vol * s_vol + di % d_vol + start * vol;
dst[di] = src[si];
}
tl_tensor *tl_tensor_slice_cuda(const tl_tensor *src, tl_tensor *dst, int axis,
int start, int len)
{
int i;
int d_vol, s_vol, vol;
int thread_num, block_num;
assert(src && tl_is_device_mem(src->data));
assert(axis < src->ndim && axis >= 0);
assert(len <= src->dims[axis] && len > 0);
assert(start < src->dims[axis] && start >= 0);
assert(len + start <= src->dims[axis]);
if (dst) {
#ifndef NDEBUG
assert(tl_is_device_mem(dst->data));
assert(src->dtype == dst->dtype);
assert(dst->ndim == src->ndim);
for (i = 0; i < src->ndim; i++)
assert(i == axis ? dst->dims[i] == len :
dst->dims[i] == src->dims[i]);
#endif
} else {
dst = tl_tensor_zeros_slice_cuda(src, axis, len, src->dtype);
}
for (i = axis+1, vol = 1; i < dst->ndim; i++)
vol *= dst->dims[i];
d_vol = vol * dst->dims[axis];
s_vol = vol * src->dims[axis];
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (src->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( slice_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src->data,
(double *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( slice_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src->data,
(float *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( slice_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src->data,
(int32_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( slice_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src->data,
(int16_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( slice_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src->data,
(int8_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( slice_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src->data,
(uint32_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( slice_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src->data,
(uint16_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( slice_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src->data,
(uint8_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_BOOL:
hipLaunchKernelGGL(( slice_kernel<tl_bool_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src->data,
(tl_bool_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
template <typename T>
static __global__ void maxreduce_kernel(T *src, T *dst, int32_t *arg, int dim_size,
int reduce_vol, int batch_vol,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
/* src[si] is the first element in this thread to be compared, then
si = batch_vol * batch + (di - reduce_vol * batch),
where batch = di / reduce_vol,
which is the same as the following code: */
int si = (batch_vol - reduce_vol) * (di / reduce_vol) + di;
T now = src[si], max = now;
int maxi = 0;
for (int i = 1; i < dim_size; i++) {
now = src[si+i*reduce_vol];
if (now > max) {
max = now;
maxi = i;
}
}
dst[di] = max;
if (arg)
arg[di] = maxi;
}
tl_tensor *tl_tensor_maxreduce_cuda(const tl_tensor *src, tl_tensor *dst,
tl_tensor *arg, int axis)
{
/* suppose the shape of src is [N, C, H, W], dim = 1, then thread_num is N x H x W
reduce_vol is H x W, index_vol is C x H x W */
int thread_num, block_num, reduce_vol, index_vol;
void *arg_data;
int i;
tl_check_dtype(src->dtype);
assert(src && tl_is_device_mem(src->data));
assert(axis < src->ndim && axis >= 0);
if (dst) {
#ifndef NDEBUG
assert(tl_is_device_mem(dst->data));
assert(src->dtype == dst->dtype);
for (i = 0; i < dst->ndim; i++)
assert(i == axis ? dst->dims[i] == 1 :
dst->dims[i] == src->dims[i]);
#endif
} else {
dst = tl_tensor_zeros_slice_cuda(src, axis, 1, src->dtype);
}
if (arg) {
#ifndef NDEBUG
assert(tl_is_device_mem(arg->data));
assert(arg->dtype == TL_INT32);
for (i = 0; i < arg->ndim; i++)
assert(i == axis ? arg->dims[i] == 1 :
arg->dims[i] == src->dims[i]);
#endif
arg_data = arg->data;
} else {
arg_data = NULL;
}
for (i = axis+1, thread_num = 1; i < dst->ndim; i++)
thread_num *= dst->dims[i];
reduce_vol = thread_num;
index_vol = thread_num * src->dims[axis];
for (i = 0; i < axis; i++)
thread_num *= dst->dims[i];
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (src->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( maxreduce_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src->data,
(double *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( maxreduce_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src->data,
(float *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( maxreduce_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src->data,
(int32_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( maxreduce_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src->data,
(int16_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( maxreduce_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src->data,
(int8_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( maxreduce_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src->data,
(uint32_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( maxreduce_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src->data,
(uint16_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( maxreduce_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src->data,
(uint8_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_BOOL:
hipLaunchKernelGGL(( maxreduce_kernel<tl_bool_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src->data,
(tl_bool_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
template <typename T>
static __global__ void mul_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = src1[di] * src2[di];
}
static __global__ void mul_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] * src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void div_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
// assert(src2[di] && "divided by zero");
dst[di] = src1[di] / src2[di];
}
static __global__ void div_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] / src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void sum_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = src1[di] + src2[di];
}
static __global__ void sum_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] + src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void sub_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = src1[di] - src2[di];
}
static __global__ void sub_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] - src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void max_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = max(src1[di], src2[di]);
}
template <typename T>
static __global__ void min_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = min(src1[di], src2[di]);
}
template <typename T>
static __global__ void pow_int_kernel(T *src1, T *src2, T *dst, T type_max, T type_min,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
float f1, f2, fd;
f1 = (float)src1[di];
f2 = (float)src2[di];
fd = powf(f1, f2);
if (fd >= type_max)
dst[di] = type_max;
else if (fd <= type_min)
dst[di] = type_min;
else
dst[di] = (T)fd;
}
static __global__ void pow_double_kernel(double *src1, double *src2, double *dst,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = pow(src1[di], src2[di]);
}
static __global__ void pow_float_kernel(float *src1, float *src2, float *dst,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = powf(src1[di], src2[di]);
}
static __global__ void pow_bool_kernel(tl_bool_t *src1, tl_bool_t *src2, tl_bool_t *dst,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
float f1, f2, fd;
f1 = (float)src1[di];
f2 = (float)src2[di];
fd = powf(f1, f2);
if (fd > 0 || fd < 0)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
tl_tensor *tl_tensor_elew_cuda(const tl_tensor *src1, const tl_tensor *src2,
tl_tensor *dst, tl_elew_op elew_op)
{
assert(tl_tensor_issameshape(src1, src2));
assert(tl_is_device_mem(src1->data) && tl_is_device_mem(src2->data));
assert(src1->dtype == src2->dtype);
if (dst) {
assert(tl_is_device_mem(dst->data));
assert(tl_tensor_issameshape(src1, dst));
assert(src1->dtype == dst->dtype);
} else {
dst = tl_tensor_zeros_cuda(src1->ndim, src2->dims, src1->dtype);
}
int thread_num = dst->len;
int block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (src1->dtype) {
case TL_DOUBLE:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_double_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_FLOAT:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_float_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_INT32:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_int_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
INT32_MAX,
INT32_MIN,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_INT16:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_int_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
INT16_MAX,
INT16_MIN,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_INT8:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_int_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
INT8_MAX,
INT8_MIN,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_UINT32:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_int_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
UINT32_MAX,
0,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_UINT16:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_int_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
UINT16_MAX,
0,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_UINT8:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_int_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
UINT8_MAX,
0,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_BOOL:
switch (elew_op) {
case TL_MUL:
hipLaunchKernelGGL(( mul_bool_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
hipLaunchKernelGGL(( div_bool_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
hipLaunchKernelGGL(( sum_bool_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
hipLaunchKernelGGL(( sub_bool_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
hipLaunchKernelGGL(( max_kernel<tl_bool_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
hipLaunchKernelGGL(( min_kernel<tl_bool_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
hipLaunchKernelGGL(( pow_bool_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
static __global__ void convert_kernel(void *src, void *dst,
tl_dtype dtype_s, tl_dtype dtype_d,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
double val_d;
float val_f;
int32_t val_i32;
uint32_t val_u32;
int16_t val_i16;
uint16_t val_u16;
int8_t val_i8;
uint8_t val_u8;
switch (dtype_d) {
case TL_DOUBLE:
switch (dtype_s) {
case TL_DOUBLE:
((double *)dst)[di] = ((double *)src)[di];
break;
case TL_FLOAT:
((double *)dst)[di] = (double)((float *)src)[di];
break;
case TL_INT32:
((double *)dst)[di] = (double)((int32_t *)src)[di];
break;
case TL_INT16:
((double *)dst)[di] = (double)((int16_t *)src)[di];
break;
case TL_INT8:
((double *)dst)[di] = (double)((int8_t *)src)[di];
break;
case TL_UINT32:
((double *)dst)[di] = (double)((uint32_t *)src)[di];
break;
case TL_UINT16:
((double *)dst)[di] = (double)((uint16_t *)src)[di];
break;
case TL_UINT8:
((double *)dst)[di] = (double)((uint8_t *)src)[di];
break;
case TL_BOOL:
((double *)dst)[di] = (double)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_FLOAT:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= FLT_MAX)
((float *)dst)[di] = FLT_MAX;
else if (val_d <= -FLT_MAX)
((float *)dst)[di] = -FLT_MAX;
else
((float *)dst)[di] = (float)val_d;
break;
case TL_FLOAT:
((float *)dst)[di] = ((float *)src)[di];
break;
case TL_INT32:
((float *)dst)[di] = (float)((int32_t *)src)[di];
break;
case TL_INT16:
((float *)dst)[di] = (float)((int16_t *)src)[di];
break;
case TL_INT8:
((float *)dst)[di] = (float)((int8_t *)src)[di];
break;
case TL_UINT32:
((float *)dst)[di] = (float)((uint32_t *)src)[di];
break;
case TL_UINT16:
((float *)dst)[di] = (float)((uint16_t *)src)[di];
break;
case TL_UINT8:
((float *)dst)[di] = (float)((uint8_t *)src)[di];
break;
case TL_BOOL:
((float *)dst)[di] = (float)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= INT32_MAX)
((int32_t *)dst)[di] = INT32_MAX;
else if (val_d <= INT32_MIN)
((int32_t *)dst)[di] = INT32_MIN;
else
((int32_t *)dst)[di] = (int32_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= INT32_MAX)
((int32_t *)dst)[di] = INT32_MAX;
else if (val_f <= INT32_MIN)
((int32_t *)dst)[di] = INT32_MIN;
else
((int32_t *)dst)[di] = (int32_t)val_f;
break;
case TL_INT32:
((int32_t *)dst)[di] = ((int32_t *)src)[di];
break;
case TL_INT16:
((int32_t *)dst)[di] = (int32_t)((int16_t *)src)[di];
break;
case TL_INT8:
((int32_t *)dst)[di] = (int32_t)((int8_t *)src)[di];
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= INT32_MAX)
((int32_t *)dst)[di] = INT32_MAX;
else
((int32_t *)dst)[di] = (int32_t)val_u32;
break;
case TL_UINT16:
((int32_t *)dst)[di] = (int32_t)((uint16_t *)src)[di];
break;
case TL_UINT8:
((int32_t *)dst)[di] = (int32_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((int32_t *)dst)[di] = (int32_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else if (val_d <= INT16_MIN)
((int16_t *)dst)[di] = INT16_MIN;
else
((int16_t *)dst)[di] = (int16_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else if (val_f <= INT16_MIN)
((int16_t *)dst)[di] = INT16_MIN;
else
((int16_t *)dst)[di] = (int16_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else if (val_i32 <= INT16_MIN)
((int16_t *)dst)[di] = INT16_MIN;
else
((int16_t *)dst)[di] = (int16_t)val_i32;
break;
case TL_INT16:
((int16_t *)dst)[di] = ((int16_t *)src)[di];
break;
case TL_INT8:
((int16_t *)dst)[di] = (int16_t)((int8_t *)src)[di];
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else
((int16_t *)dst)[di] = (int16_t)val_u32;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16 >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else
((int16_t *)dst)[di] = (int16_t)val_u16;
break;
case TL_UINT8:
((int16_t *)dst)[di] = (int16_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((int16_t *)dst)[di] = (int16_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_d <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_f <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_i32 <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_i32;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_i16 <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_i16;
break;
case TL_INT8:
((int8_t *)dst)[di] = ((int8_t *)src)[di];
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else
((int8_t *)dst)[di] = (int8_t)val_u32;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else
((int8_t *)dst)[di] = (int8_t)val_u16;
break;
case TL_UINT8:
val_u8 = ((uint8_t *)src)[di];
if (val_u8 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else
((int8_t *)dst)[di] = (int8_t)val_u8;
break;
case TL_BOOL:
((int8_t *)dst)[di] = (int8_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= UINT32_MAX)
((uint32_t *)dst)[di] = UINT32_MAX;
else if (val_d < 0)
((uint32_t *)dst)[di] = 0;
else
((uint32_t *)dst)[di] = (uint32_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= UINT32_MAX)
((uint32_t *)dst)[di] = UINT32_MAX;
else if (val_f < 0)
((uint32_t *)dst)[di] = 0;
else
((uint32_t *)dst)[di] = (uint32_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= 0)
((uint32_t *)dst)[di] = (uint32_t)val_i32;
else
((uint32_t *)dst)[di] = 0;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= 0)
((uint32_t *)dst)[di] = (uint32_t)val_i16;
else
((uint32_t *)dst)[di] = 0;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8 >= 0)
((uint32_t *)dst)[di] = (uint32_t)val_i8;
else
((uint32_t *)dst)[di] = 0;
break;
case TL_UINT32:
((uint32_t *)dst)[di] = ((uint32_t *)src)[di];
break;
case TL_UINT16:
((uint32_t *)dst)[di] = (uint32_t)((uint16_t *)src)[di];
break;
case TL_UINT8:
((uint32_t *)dst)[di] = (uint32_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((uint32_t *)dst)[di] = (uint32_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else if (val_d < 0)
((uint16_t *)dst)[di] = 0;
else
((uint16_t *)dst)[di] = (uint16_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else if (val_f < 0)
((uint16_t *)dst)[di] = 0;
else
((uint16_t *)dst)[di] = (uint16_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else if (val_i32 < 0)
((uint16_t *)dst)[di] = 0;
else
((uint16_t *)dst)[di] = (uint16_t)val_i32;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= 0)
((uint16_t *)dst)[di] = (uint16_t)val_i16;
else
((uint16_t *)dst)[di] = 0;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8 >= 0)
((uint16_t *)dst)[di] = (uint16_t)val_i8;
else
((uint16_t *)dst)[di] = 0;
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else
((uint16_t *)dst)[di] = (uint16_t)val_u32;
break;
case TL_UINT16:
((uint16_t *)dst)[di] = ((uint16_t *)src)[di];
break;
case TL_UINT8:
((uint16_t *)dst)[di] = (uint16_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((uint16_t *)dst)[di] = (uint16_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_d < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_f < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_i32 < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_i32;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_i16 < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_i16;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8 >= 0)
((uint8_t *)dst)[di] = (uint8_t)val_i8;
else
((uint8_t *)dst)[di] = 0;
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else
((uint8_t *)dst)[di] = (uint8_t)val_u32;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else
((uint8_t *)dst)[di] = (uint8_t)val_u16;
break;
case TL_UINT8:
((uint8_t *)dst)[di] = ((uint8_t *)src)[di];
break;
case TL_BOOL:
((uint8_t *)dst)[di] = (uint8_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_BOOL:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d > 0 || val_d < 0)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f > 0 || val_f < 0)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_UINT8:
val_u8 = ((uint8_t *)src)[di];
if (val_u8)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_BOOL:
((tl_bool_t *)dst)[di] = ((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
}
tl_tensor *tl_tensor_convert_cuda(const tl_tensor *src, tl_tensor *dst,
tl_dtype dtype_d)
{
tl_dtype dtype_s;
int thread_num, block_num;
assert(src && tl_is_device_mem(src->data));
if (dst) {
assert(tl_is_device_mem(dst->data));
assert(tl_tensor_issameshape(src, dst));
assert(dst->dtype == dtype_d);
} else {
dst = tl_tensor_zeros_cuda(src->ndim, src->dims, dtype_d);
}
dtype_s = src->dtype;
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
hipLaunchKernelGGL(( convert_kernel), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, src->data, dst->data,
dtype_s, dtype_d,
BLOCK_SIZE, thread_num);
tl_cuda_device_sync();
return dst;
}
template <typename T>
static __global__ void transpose_kernel(T *src, T *dst, int ndim,
int *s_dims, int *d_dims,
int *axes, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int s_ids[TL_MAXDIM], d_ids[TL_MAXDIM];
get_coords(di, d_ids, ndim, d_dims);
for (int i = 0; i < ndim; i++)
s_ids[axes[i]] = d_ids[i];
int si = get_index(s_ids, ndim, s_dims);
dst[di] = src[si];
}
tl_tensor *tl_tensor_transpose_cuda(const tl_tensor *src, tl_tensor *dst,
const int *axes)
{
int i;
#ifndef NDEBUG
int tmp[TL_MAXDIM] = {0};
for (i = 0; i < src->ndim; i++)
tmp[axes[i]] = 1;
for (i = 0; i < src->ndim; i++)
assert(tmp[i] && "axes don't match src tensor's shape");
assert(src && tl_is_device_mem(src->data));
#endif
if (dst) {
#ifndef NDEBUG
assert(tl_is_device_mem(dst->data));
assert(src->dtype == dst->dtype);
assert(src->len == dst->len);
assert(src->ndim == dst->ndim);
for (i = 0; i < dst->ndim; i++)
assert(src->dims[axes[i]] = dst->dims[i]);
#endif
} else {
int d_dims[TL_MAXDIM];
for (i = 0; i < src->ndim; i++)
d_dims[i] = src->dims[axes[i]];
dst = tl_tensor_zeros_cuda(src->ndim, d_dims, src->dtype);
}
int *axes_device;
int *s_dims, *d_dims;
int thread_num, block_num;
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
s_dims = (int *)tl_clone_h2d(src->dims, sizeof(int) * src->ndim);
d_dims = (int *)tl_clone_h2d(dst->dims, sizeof(int) * dst->ndim);
axes_device = (int *)tl_clone_h2d(axes, sizeof(int) * src->ndim);
switch (src->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( transpose_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double *)src->data,
(double *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( transpose_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float *)src->data,
(float *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( transpose_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t *)src->data,
(int32_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( transpose_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t *)src->data,
(int16_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( transpose_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t *)src->data,
(int8_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( transpose_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t *)src->data,
(uint32_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( transpose_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t *)src->data,
(uint16_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT8:
hipLaunchKernelGGL(( transpose_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t *)src->data,
(uint8_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_BOOL:
hipLaunchKernelGGL(( transpose_kernel<tl_bool_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t *)src->data,
(tl_bool_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
tl_free_cuda(s_dims);
tl_free_cuda(d_dims);
tl_free_cuda(axes_device);
return dst;
}
template <typename T>
static __global__ void nearest_resize_kernel(const T *src, T *dst, int ndim,
const int *dims, const int *new_dims,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
__shared__ float scales[TL_MAXDIM];
if (di < ndim)
scales[di] = (float)dims[di] / (float)new_dims[di];
if (di > total)
return;
int si;
float rounded;
int src_coords[TL_MAXDIM];
int dst_coords[TL_MAXDIM];
get_coords(di, dst_coords, ndim, new_dims);
for (int i = 0; i < ndim; i++) {
rounded = roundf(((float)dst_coords[i] + 0.5) * scales[i] - 0.5);
convert_device(&src_coords[i], TL_INT32, &rounded, TL_FLOAT);
}
si = get_index(src_coords, ndim, dims);
dst[di] = src[si];
}
tl_tensor *tl_tensor_resize_cuda(const tl_tensor *src, tl_tensor *dst,
const int *new_dims, tl_resize_type rtype)
{
assert(src && src->data);
assert(new_dims);
tl_check_resize_type(rtype);
if (dst) {
assert(dst->data);
assert(dst->dtype == src->dtype);
assert(dst->ndim == src->ndim);
} else {
dst = tl_tensor_zeros_cuda(src->ndim, new_dims, src->dtype);
}
int block_num, thread_num;
int *dims_cuda, *new_dims_cuda;
dims_cuda = (int *)tl_clone_h2d(src->dims, sizeof(int)*src->ndim);
new_dims_cuda = (int *)tl_clone_h2d(new_dims, sizeof(int)*src->ndim);
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (rtype) {
case TL_NEAREST:
switch (src->dtype) {
case TL_DOUBLE:
hipLaunchKernelGGL(( nearest_resize_kernel<double>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (double*)src->data, (double*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
hipLaunchKernelGGL(( nearest_resize_kernel<float>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (float*)src->data, (float*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_INT32:
hipLaunchKernelGGL(( nearest_resize_kernel<int32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int32_t*)src->data, (int32_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_INT16:
hipLaunchKernelGGL(( nearest_resize_kernel<int16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int16_t*)src->data, (int16_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_INT8:
hipLaunchKernelGGL(( nearest_resize_kernel<int8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (int8_t*)src->data, (int8_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
hipLaunchKernelGGL(( nearest_resize_kernel<uint32_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint32_t*)src->data, (uint32_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
hipLaunchKernelGGL(( nearest_resize_kernel<uint16_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint16_t*)src->data, (uint16_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
case TL_UINT8:
hipLaunchKernelGGL(( nearest_resize_kernel<uint8_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (uint8_t*)src->data, (uint8_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_BOOL:
hipLaunchKernelGGL(( nearest_resize_kernel<tl_bool_t>), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, (tl_bool_t*)src->data, (tl_bool_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_LINEAR:
assert(0 && "not support TL_LINEAR yet");
break;
default:
assert(0 && "unsupported tl_resize_type");
break;
}
tl_cuda_device_sync();
tl_free_cuda(dims_cuda);
tl_free_cuda(new_dims_cuda);
return dst;
}
| b301289f089152e7728688e928717c52e3502c1e.cu | /*
* Copyright (c) 2018 Zhao Zhixu
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <string.h>
#include <assert.h>
#include <stdarg.h>
#include <math.h>
#include "tl_tensor.h"
#include "tl_util.h"
#define BLOCK_SIZE 512
#define BLOCK_NUM(bs, tn) (((tn) + (bs) - 1) / (bs))
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static inline __device__ int get_index(const int *ids, int ndim, const int *dims)
{
int i, id;
for (i = 0, id = ids[0]; i < ndim-1; i++)
id = dims[i+1] * id + ids[i+1];
return id;
}
static inline __device__ void get_coords(int id, int *ids, int ndim, const int *dims)
{
for (int i = ndim-1; i >= 0; i--) {
ids[i] = id % dims[i];
id /= dims[i];
}
}
static __device__ void convert_device(void *pd, tl_dtype dtype_d,
const void *ps, tl_dtype dtype_s)
{
tl_check_dtype(dtype_d);
tl_check_dtype(dtype_s);
double val_d;
float val_f;
int32_t val_i32;
uint32_t val_u32;
int16_t val_i16;
uint16_t val_u16;
int8_t val_i8;
uint8_t val_u8;
switch (dtype_d) {
case TL_DOUBLE:
switch (dtype_s) {
case TL_DOUBLE:
*(double *)pd = *(double *)ps;
break;
case TL_FLOAT:
*(double *)pd = (double)*(float *)ps;
break;
case TL_INT32:
*(double *)pd = (double)*(int32_t *)ps;
break;
case TL_INT16:
*(double *)pd = (double)*(int16_t *)ps;
break;
case TL_INT8:
*(double *)pd = (double)*(int8_t *)ps;
break;
case TL_UINT32:
*(double *)pd = (double)*(uint32_t *)ps;
break;
case TL_UINT16:
*(double *)pd = (double)*(uint16_t *)ps;
break;
case TL_UINT8:
*(double *)pd = (double)*(uint8_t *)ps;
break;
case TL_BOOL:
*(double *)pd = (double)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_FLOAT:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= FLT_MAX)
*(float *)pd = FLT_MAX;
else if (val_d <= -FLT_MAX)
*(float *)pd = -FLT_MAX;
else
*(float *)pd = (float)val_d;
break;
case TL_FLOAT:
*(float *)pd = *(float *)ps;
break;
case TL_INT32:
*(float *)pd = (float)*(int32_t *)ps;
break;
case TL_INT16:
*(float *)pd = (float)*(int16_t *)ps;
break;
case TL_INT8:
*(float *)pd = (float)*(int8_t *)ps;
break;
case TL_UINT32:
*(float *)pd = (float)*(uint32_t *)ps;
break;
case TL_UINT16:
*(float *)pd = (float)*(uint16_t *)ps;
break;
case TL_UINT8:
*(float *)pd = (float)*(uint8_t *)ps;
break;
case TL_BOOL:
*(float *)pd = (float)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= INT32_MAX)
*(int32_t *)pd = INT32_MAX;
else if (val_d <= INT32_MIN)
*(int32_t *)pd = INT32_MIN;
else
*(int32_t *)pd = (int32_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= INT32_MAX)
*(int32_t *)pd = INT32_MAX;
else if (val_f <= INT32_MIN)
*(int32_t *)pd = INT32_MIN;
else
*(int32_t *)pd = (int32_t)val_f;
break;
case TL_INT32:
*(int32_t *)pd = *(int32_t *)ps;
break;
case TL_INT16:
*(int32_t *)pd = (int32_t)*(int16_t *)ps;
break;
case TL_INT8:
*(int32_t *)pd = (int32_t)*(int8_t *)ps;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= INT32_MAX)
*(int32_t *)pd = INT32_MAX;
else
*(int32_t *)pd = (int32_t)val_u32;
break;
case TL_UINT16:
/* printf("*ps = %d\n", *(uint16_t *)ps); */
*(int32_t *)pd = (int32_t)*(uint16_t *)ps;
/* printf("*pd = %d\n", *(int32_t *)pd); */
break;
case TL_UINT8:
*(int32_t *)pd = (int32_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(int32_t *)pd = (int32_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else if (val_d <= INT16_MIN)
*(int16_t *)pd = INT16_MIN;
else
*(int16_t *)pd = (int16_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else if (val_f <= INT16_MIN)
*(int16_t *)pd = INT16_MIN;
else
*(int16_t *)pd = (int16_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else if (val_i32 <= INT16_MIN)
*(int16_t *)pd = INT16_MIN;
else
*(int16_t *)pd = (int16_t)val_i32;
break;
case TL_INT16:
*(int16_t *)pd = *(int16_t *)ps;
break;
case TL_INT8:
*(int16_t *)pd = (int16_t)*(int8_t *)ps;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else
*(int16_t *)pd = (int16_t)val_u32;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16 >= INT16_MAX)
*(int16_t *)pd = INT16_MAX;
else
*(int16_t *)pd = (int16_t)val_u16;
break;
case TL_UINT8:
*(int16_t *)pd = (int16_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(int16_t *)pd = (int16_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_d <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_f <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_i32 <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_i32;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else if (val_i16 <= INT8_MIN)
*(int8_t *)pd = INT8_MIN;
else
*(int8_t *)pd = (int8_t)val_i16;
break;
case TL_INT8:
*(int8_t *)pd = *(int8_t *)ps;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else
*(int8_t *)pd = (int8_t)val_u32;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else
*(int8_t *)pd = (int8_t)val_u16;
break;
case TL_UINT8:
val_u8 = *(uint8_t *)ps;
if (val_u8 >= INT8_MAX)
*(int8_t *)pd = INT8_MAX;
else
*(int8_t *)pd = (int8_t)val_u8;
break;
case TL_BOOL:
*(int8_t *)pd = (int8_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= UINT32_MAX)
*(uint32_t *)pd = UINT32_MAX;
else if (val_d < 0)
*(uint32_t *)pd = 0;
else
*(uint32_t *)pd = (uint32_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= UINT32_MAX)
*(uint32_t *)pd = UINT32_MAX;
else if (val_f < 0)
*(uint32_t *)pd = 0;
else
*(uint32_t *)pd = (uint32_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= 0)
*(uint32_t *)pd = (uint32_t)val_i32;
else
*(uint32_t *)pd = 0;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= 0)
*(uint32_t *)pd = (uint32_t)val_i16;
else
*(uint32_t *)pd = 0;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8 >= 0)
*(uint32_t *)pd = (uint32_t)val_i8;
else
*(uint32_t *)pd = 0;
break;
case TL_UINT32:
*(uint32_t *)pd = *(uint32_t *)ps;
break;
case TL_UINT16:
*(uint32_t *)pd = (uint32_t)*(uint16_t *)ps;
break;
case TL_UINT8:
*(uint32_t *)pd = (uint32_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(uint32_t *)pd = (uint32_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else if (val_d < 0)
*(uint16_t *)pd = 0;
else
*(uint16_t *)pd = (uint16_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else if (val_f < 0)
*(uint16_t *)pd = 0;
else
*(uint16_t *)pd = (uint16_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else if (val_i32 < 0)
*(uint16_t *)pd = 0;
else
*(uint16_t *)pd = (uint16_t)val_i32;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= 0)
*(uint16_t *)pd = (uint16_t)val_i16;
else
*(uint16_t *)pd = 0;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8 >= 0)
*(uint16_t *)pd = (uint16_t)val_i8;
else
*(uint16_t *)pd = 0;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= UINT16_MAX)
*(uint16_t *)pd = UINT16_MAX;
else
*(uint16_t *)pd = (uint16_t)val_u32;
break;
case TL_UINT16:
*(uint16_t *)pd = *(uint16_t *)ps;
break;
case TL_UINT8:
*(uint16_t *)pd = (uint16_t)*(uint8_t *)ps;
break;
case TL_BOOL:
*(uint16_t *)pd = (uint16_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_d < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_d;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_f < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_f;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_i32 < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_i32;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else if (val_i16 < 0)
*(uint8_t *)pd = 0;
else
*(uint8_t *)pd = (uint8_t)val_i16;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8 >= 0)
*(uint8_t *)pd = (uint8_t)val_i8;
else
*(uint8_t *)pd = 0;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else
*(uint8_t *)pd = (uint8_t)val_u32;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16 >= UINT8_MAX)
*(uint8_t *)pd = UINT8_MAX;
else
*(uint8_t *)pd = (uint8_t)val_u16;
break;
case TL_UINT8:
*(uint8_t *)pd = *(uint8_t *)ps;
break;
case TL_BOOL:
*(uint8_t *)pd = (uint8_t)*(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_BOOL:
switch (dtype_s) {
case TL_DOUBLE:
val_d = *(double *)ps;
if (val_d > 0 || val_d < 0)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_FLOAT:
val_f = *(float *)ps;
if (val_f > 0 || val_f < 0)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_INT32:
val_i32 = *(int32_t *)ps;
if (val_i32)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_INT16:
val_i16 = *(int16_t *)ps;
if (val_i16)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_INT8:
val_i8 = *(int8_t *)ps;
if (val_i8)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_UINT32:
val_u32 = *(uint32_t *)ps;
if (val_u32)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_UINT16:
val_u16 = *(uint16_t *)ps;
if (val_u16)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_UINT8:
val_u8 = *(uint8_t *)ps;
if (val_u8)
*(tl_bool_t *)pd = TL_TRUE;
else
*(tl_bool_t *)pd = TL_FALSE;
break;
case TL_BOOL:
*(tl_bool_t *)pd = *(tl_bool_t *)ps;
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
}
void tl_tensor_free_data_too_cuda(tl_tensor *t)
{
if (!t)
return;
tl_free_cuda(t->data);
tl_tensor_free(t);
}
tl_tensor *tl_tensor_zeros_cuda(int ndim, const int *dims, tl_dtype dtype)
{
tl_tensor *t;
size_t size;
t = tl_tensor_create(NULL, ndim, dims, dtype);
t->owner = t;
size = t->len * tl_size_of(dtype);
t->data = tl_alloc_cuda(size);
tl_memset_cuda(t->data, 0, size);
return t;
}
tl_tensor *tl_tensor_clone_h2d(const tl_tensor *src)
{
void *data;
tl_tensor *dst;
assert(src);
data = tl_clone_h2d(src->data, src->len*tl_size_of(src->dtype));
dst = tl_tensor_create(data, src->ndim, src->dims, src->dtype);
dst->owner = dst;
return dst;
}
tl_tensor *tl_tensor_clone_d2h(const tl_tensor *src)
{
void *data;
tl_tensor *dst;
assert(src);
data = tl_clone_d2h(src->data, src->len*tl_size_of(src->dtype));
dst = tl_tensor_create(data, src->ndim, src->dims, src->dtype);
dst->owner = dst;
return dst;
}
tl_tensor *tl_tensor_clone_d2d(const tl_tensor *src)
{
void *data;
tl_tensor *dst;
assert(src);
data = tl_clone_d2d(src->data, src->len*tl_size_of(src->dtype));
dst = tl_tensor_create(data, src->ndim, src->dims, src->dtype);
dst->owner = dst;
return dst;
}
tl_tensor *tl_tensor_repeat_h2d(const tl_tensor *src, int times)
{
void *data;
int *dims;
tl_tensor *dst;
assert(src);
data = tl_repeat_h2d(src->data, src->len*tl_size_of(src->dtype), times);
dims = (int *)tl_alloc(sizeof(int)*(src->ndim+1));
memmove(dims+1, src->dims, sizeof(int)*(src->ndim));
dims[0] = times;
dst = tl_tensor_create(data, src->ndim+1, dims, src->dtype);
dst->owner = dst;
tl_free(dims);
return dst;
}
tl_tensor *tl_tensor_repeat_d2h(const tl_tensor *src, int times)
{
void *data;
int *dims;
tl_tensor *dst;
assert(src);
data = tl_repeat_d2h(src->data, src->len*tl_size_of(src->dtype), times);
dims = (int *)tl_alloc(sizeof(int)*(src->ndim+1));
memmove(dims+1, src->dims, sizeof(int)*(src->ndim));
dims[0] = times;
dst = tl_tensor_create(data, src->ndim+1, dims, src->dtype);
dst->owner = dst;
tl_free(dims);
return dst;
}
tl_tensor *tl_tensor_repeat_d2d(const tl_tensor *src, int times)
{
void *data;
int *dims;
tl_tensor *dst;
assert(src);
data = tl_repeat_d2d(src->data, src->len*tl_size_of(src->dtype), times);
dims = (int *)tl_alloc(sizeof(int)*(src->ndim+1));
memmove(dims+1, src->dims, sizeof(int)*(src->ndim));
dims[0] = times;
dst = tl_tensor_create(data, src->ndim+1, dims, src->dtype);
dst->owner = dst;
tl_free(dims);
return dst;
}
/* arrange at host, copy to device */
tl_tensor *tl_tensor_arange_cuda(double start, double stop, double step,
tl_dtype dtype)
{
int dims[1];
void *data;
tl_tensor *dst;
double len, elem;
size_t dsize;
dsize = tl_size_of(dtype);
assert(start >= tl_dtype_min(dtype) && start <= tl_dtype_max(dtype));
assert(stop >= tl_dtype_min(dtype) && stop <= tl_dtype_max(dtype));
assert(step >= tl_dtype_min(dtype) && step <= tl_dtype_max(dtype));
assert(step != 0);
assert(stop > start); /* TODO: expand to all possibilities */
len = ceil((stop - start) / step);
if (len > INT32_MAX)
return NULL;
dims[0] = (int)len;
dst = tl_tensor_zeros_cuda(1, dims, dtype);
data = tl_tensor_zeros(1, dims, dtype);
for (int i = 0; i < dims[0]; i++) {
elem = start + step * i;
tl_convert(tl_padd(data, i, dsize), dtype, &elem, TL_DOUBLE);
}
tl_memcpy_h2d(dst->data, data, tl_size_of(dst->dtype));
return dst;
}
void tl_tensor_fprint_cuda(FILE *stream, const tl_tensor *t, const char *fmt)
{
tl_tensor *t_host;
t_host = tl_tensor_clone_d2h(t);
tl_tensor_fprint(stream, t_host, fmt);
tl_tensor_free_data_too(t_host);
}
void tl_tensor_print_cuda(const tl_tensor *t, const char *fmt)
{
tl_tensor_fprint_cuda(stdout, t, fmt);
}
int tl_tensor_save_cuda(const char *file_name, const tl_tensor *t,
const char *fmt)
{
tl_tensor *t_host;
int ret;
t_host = tl_tensor_clone_d2h(t);
ret = tl_tensor_save(file_name, t_host, fmt);
tl_tensor_free_data_too(t_host);
return ret;
}
tl_tensor *tl_tensor_zeros_slice_cuda(const tl_tensor *src, int axis, int len,
tl_dtype dtype)
{
tl_tensor *dst;
int *dims;
assert(src);
assert(axis < src->ndim && axis >= 0);
assert(len <= src->dims[axis] && len > 0);
dims = (int *)tl_clone(src->dims, sizeof(int) * src->ndim);
dims[axis] = len;
dst = tl_tensor_zeros_cuda(src->ndim, dims, dtype);
tl_free(dims);
return dst;
}
template <typename T>
static __global__ void slice_kernel(T *src, T *dst, int start, int s_vol,
int d_vol, int vol, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int si = di / d_vol * s_vol + di % d_vol + start * vol;
dst[di] = src[si];
}
tl_tensor *tl_tensor_slice_cuda(const tl_tensor *src, tl_tensor *dst, int axis,
int start, int len)
{
int i;
int d_vol, s_vol, vol;
int thread_num, block_num;
assert(src && tl_is_device_mem(src->data));
assert(axis < src->ndim && axis >= 0);
assert(len <= src->dims[axis] && len > 0);
assert(start < src->dims[axis] && start >= 0);
assert(len + start <= src->dims[axis]);
if (dst) {
#ifndef NDEBUG
assert(tl_is_device_mem(dst->data));
assert(src->dtype == dst->dtype);
assert(dst->ndim == src->ndim);
for (i = 0; i < src->ndim; i++)
assert(i == axis ? dst->dims[i] == len :
dst->dims[i] == src->dims[i]);
#endif
} else {
dst = tl_tensor_zeros_slice_cuda(src, axis, len, src->dtype);
}
for (i = axis+1, vol = 1; i < dst->ndim; i++)
vol *= dst->dims[i];
d_vol = vol * dst->dims[axis];
s_vol = vol * src->dims[axis];
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (src->dtype) {
case TL_DOUBLE:
slice_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src->data,
(double *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
slice_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src->data,
(float *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_INT32:
slice_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src->data,
(int32_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_INT16:
slice_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src->data,
(int16_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_INT8:
slice_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src->data,
(int8_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
slice_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src->data,
(uint32_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
slice_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src->data,
(uint16_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_UINT8:
slice_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src->data,
(uint8_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
case TL_BOOL:
slice_kernel<tl_bool_t><<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src->data,
(tl_bool_t *)dst->data,
start, s_vol, d_vol, vol,
BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
template <typename T>
static __global__ void maxreduce_kernel(T *src, T *dst, int32_t *arg, int dim_size,
int reduce_vol, int batch_vol,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
/* src[si] is the first element in this thread to be compared, then
si = batch_vol * batch + (di - reduce_vol * batch),
where batch = di / reduce_vol,
which is the same as the following code: */
int si = (batch_vol - reduce_vol) * (di / reduce_vol) + di;
T now = src[si], max = now;
int maxi = 0;
for (int i = 1; i < dim_size; i++) {
now = src[si+i*reduce_vol];
if (now > max) {
max = now;
maxi = i;
}
}
dst[di] = max;
if (arg)
arg[di] = maxi;
}
tl_tensor *tl_tensor_maxreduce_cuda(const tl_tensor *src, tl_tensor *dst,
tl_tensor *arg, int axis)
{
/* suppose the shape of src is [N, C, H, W], dim = 1, then thread_num is N x H x W
reduce_vol is H x W, index_vol is C x H x W */
int thread_num, block_num, reduce_vol, index_vol;
void *arg_data;
int i;
tl_check_dtype(src->dtype);
assert(src && tl_is_device_mem(src->data));
assert(axis < src->ndim && axis >= 0);
if (dst) {
#ifndef NDEBUG
assert(tl_is_device_mem(dst->data));
assert(src->dtype == dst->dtype);
for (i = 0; i < dst->ndim; i++)
assert(i == axis ? dst->dims[i] == 1 :
dst->dims[i] == src->dims[i]);
#endif
} else {
dst = tl_tensor_zeros_slice_cuda(src, axis, 1, src->dtype);
}
if (arg) {
#ifndef NDEBUG
assert(tl_is_device_mem(arg->data));
assert(arg->dtype == TL_INT32);
for (i = 0; i < arg->ndim; i++)
assert(i == axis ? arg->dims[i] == 1 :
arg->dims[i] == src->dims[i]);
#endif
arg_data = arg->data;
} else {
arg_data = NULL;
}
for (i = axis+1, thread_num = 1; i < dst->ndim; i++)
thread_num *= dst->dims[i];
reduce_vol = thread_num;
index_vol = thread_num * src->dims[axis];
for (i = 0; i < axis; i++)
thread_num *= dst->dims[i];
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (src->dtype) {
case TL_DOUBLE:
maxreduce_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src->data,
(double *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_FLOAT:
maxreduce_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src->data,
(float *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_INT32:
maxreduce_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src->data,
(int32_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_INT16:
maxreduce_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src->data,
(int16_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_INT8:
maxreduce_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src->data,
(int8_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT32:
maxreduce_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src->data,
(uint32_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT16:
maxreduce_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src->data,
(uint16_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT8:
maxreduce_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src->data,
(uint8_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
case TL_BOOL:
maxreduce_kernel<tl_bool_t><<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src->data,
(tl_bool_t *)dst->data,
(int32_t *)arg_data,
src->dims[axis],
reduce_vol,
index_vol,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
template <typename T>
static __global__ void mul_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = src1[di] * src2[di];
}
static __global__ void mul_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] * src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void div_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
// assert(src2[di] && "divided by zero");
dst[di] = src1[di] / src2[di];
}
static __global__ void div_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] / src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void sum_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = src1[di] + src2[di];
}
static __global__ void sum_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] + src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void sub_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = src1[di] - src2[di];
}
static __global__ void sub_bool_kernel(tl_bool_t *src1, tl_bool_t *src2,
tl_bool_t *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int res = src1[di] - src2[di];
if (res)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
template <typename T>
static __global__ void max_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = max(src1[di], src2[di]);
}
template <typename T>
static __global__ void min_kernel(T *src1, T *src2, T *dst, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = min(src1[di], src2[di]);
}
template <typename T>
static __global__ void pow_int_kernel(T *src1, T *src2, T *dst, T type_max, T type_min,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
float f1, f2, fd;
f1 = (float)src1[di];
f2 = (float)src2[di];
fd = powf(f1, f2);
if (fd >= type_max)
dst[di] = type_max;
else if (fd <= type_min)
dst[di] = type_min;
else
dst[di] = (T)fd;
}
static __global__ void pow_double_kernel(double *src1, double *src2, double *dst,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = pow(src1[di], src2[di]);
}
static __global__ void pow_float_kernel(float *src1, float *src2, float *dst,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
dst[di] = powf(src1[di], src2[di]);
}
static __global__ void pow_bool_kernel(tl_bool_t *src1, tl_bool_t *src2, tl_bool_t *dst,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
float f1, f2, fd;
f1 = (float)src1[di];
f2 = (float)src2[di];
fd = powf(f1, f2);
if (fd > 0 || fd < 0)
dst[di] = TL_TRUE;
else
dst[di] = TL_FALSE;
}
tl_tensor *tl_tensor_elew_cuda(const tl_tensor *src1, const tl_tensor *src2,
tl_tensor *dst, tl_elew_op elew_op)
{
assert(tl_tensor_issameshape(src1, src2));
assert(tl_is_device_mem(src1->data) && tl_is_device_mem(src2->data));
assert(src1->dtype == src2->dtype);
if (dst) {
assert(tl_is_device_mem(dst->data));
assert(tl_tensor_issameshape(src1, dst));
assert(src1->dtype == dst->dtype);
} else {
dst = tl_tensor_zeros_cuda(src1->ndim, src2->dims, src1->dtype);
}
int thread_num = dst->len;
int block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (src1->dtype) {
case TL_DOUBLE:
switch (elew_op) {
case TL_MUL:
mul_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_double_kernel<<<block_num, BLOCK_SIZE>>>((double *)src1->data,
(double *)src2->data,
(double *)dst->data,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_FLOAT:
switch (elew_op) {
case TL_MUL:
mul_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_float_kernel<<<block_num, BLOCK_SIZE>>>((float *)src1->data,
(float *)src2->data,
(float *)dst->data,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_INT32:
switch (elew_op) {
case TL_MUL:
mul_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_int_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src1->data,
(int32_t *)src2->data,
(int32_t *)dst->data,
INT32_MAX,
INT32_MIN,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_INT16:
switch (elew_op) {
case TL_MUL:
mul_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_int_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src1->data,
(int16_t *)src2->data,
(int16_t *)dst->data,
INT16_MAX,
INT16_MIN,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_INT8:
switch (elew_op) {
case TL_MUL:
mul_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_int_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src1->data,
(int8_t *)src2->data,
(int8_t *)dst->data,
INT8_MAX,
INT8_MIN,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_UINT32:
switch (elew_op) {
case TL_MUL:
mul_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_int_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src1->data,
(uint32_t *)src2->data,
(uint32_t *)dst->data,
UINT32_MAX,
0,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_UINT16:
switch (elew_op) {
case TL_MUL:
mul_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_int_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src1->data,
(uint16_t *)src2->data,
(uint16_t *)dst->data,
UINT16_MAX,
0,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_UINT8:
switch (elew_op) {
case TL_MUL:
mul_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_int_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src1->data,
(uint8_t *)src2->data,
(uint8_t *)dst->data,
UINT8_MAX,
0,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
case TL_BOOL:
switch (elew_op) {
case TL_MUL:
mul_bool_kernel<<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_DIV:
div_bool_kernel<<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUM:
sum_bool_kernel<<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_SUB:
sub_bool_kernel<<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MAX:
max_kernel<tl_bool_t><<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_MIN:
min_kernel<tl_bool_t><<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
case TL_POW:
pow_bool_kernel<<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src1->data,
(tl_bool_t *)src2->data,
(tl_bool_t *)dst->data,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsopported tl_elew_op");
break;
}
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
return dst;
}
static __global__ void convert_kernel(void *src, void *dst,
tl_dtype dtype_s, tl_dtype dtype_d,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
double val_d;
float val_f;
int32_t val_i32;
uint32_t val_u32;
int16_t val_i16;
uint16_t val_u16;
int8_t val_i8;
uint8_t val_u8;
switch (dtype_d) {
case TL_DOUBLE:
switch (dtype_s) {
case TL_DOUBLE:
((double *)dst)[di] = ((double *)src)[di];
break;
case TL_FLOAT:
((double *)dst)[di] = (double)((float *)src)[di];
break;
case TL_INT32:
((double *)dst)[di] = (double)((int32_t *)src)[di];
break;
case TL_INT16:
((double *)dst)[di] = (double)((int16_t *)src)[di];
break;
case TL_INT8:
((double *)dst)[di] = (double)((int8_t *)src)[di];
break;
case TL_UINT32:
((double *)dst)[di] = (double)((uint32_t *)src)[di];
break;
case TL_UINT16:
((double *)dst)[di] = (double)((uint16_t *)src)[di];
break;
case TL_UINT8:
((double *)dst)[di] = (double)((uint8_t *)src)[di];
break;
case TL_BOOL:
((double *)dst)[di] = (double)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_FLOAT:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= FLT_MAX)
((float *)dst)[di] = FLT_MAX;
else if (val_d <= -FLT_MAX)
((float *)dst)[di] = -FLT_MAX;
else
((float *)dst)[di] = (float)val_d;
break;
case TL_FLOAT:
((float *)dst)[di] = ((float *)src)[di];
break;
case TL_INT32:
((float *)dst)[di] = (float)((int32_t *)src)[di];
break;
case TL_INT16:
((float *)dst)[di] = (float)((int16_t *)src)[di];
break;
case TL_INT8:
((float *)dst)[di] = (float)((int8_t *)src)[di];
break;
case TL_UINT32:
((float *)dst)[di] = (float)((uint32_t *)src)[di];
break;
case TL_UINT16:
((float *)dst)[di] = (float)((uint16_t *)src)[di];
break;
case TL_UINT8:
((float *)dst)[di] = (float)((uint8_t *)src)[di];
break;
case TL_BOOL:
((float *)dst)[di] = (float)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= INT32_MAX)
((int32_t *)dst)[di] = INT32_MAX;
else if (val_d <= INT32_MIN)
((int32_t *)dst)[di] = INT32_MIN;
else
((int32_t *)dst)[di] = (int32_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= INT32_MAX)
((int32_t *)dst)[di] = INT32_MAX;
else if (val_f <= INT32_MIN)
((int32_t *)dst)[di] = INT32_MIN;
else
((int32_t *)dst)[di] = (int32_t)val_f;
break;
case TL_INT32:
((int32_t *)dst)[di] = ((int32_t *)src)[di];
break;
case TL_INT16:
((int32_t *)dst)[di] = (int32_t)((int16_t *)src)[di];
break;
case TL_INT8:
((int32_t *)dst)[di] = (int32_t)((int8_t *)src)[di];
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= INT32_MAX)
((int32_t *)dst)[di] = INT32_MAX;
else
((int32_t *)dst)[di] = (int32_t)val_u32;
break;
case TL_UINT16:
((int32_t *)dst)[di] = (int32_t)((uint16_t *)src)[di];
break;
case TL_UINT8:
((int32_t *)dst)[di] = (int32_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((int32_t *)dst)[di] = (int32_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else if (val_d <= INT16_MIN)
((int16_t *)dst)[di] = INT16_MIN;
else
((int16_t *)dst)[di] = (int16_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else if (val_f <= INT16_MIN)
((int16_t *)dst)[di] = INT16_MIN;
else
((int16_t *)dst)[di] = (int16_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else if (val_i32 <= INT16_MIN)
((int16_t *)dst)[di] = INT16_MIN;
else
((int16_t *)dst)[di] = (int16_t)val_i32;
break;
case TL_INT16:
((int16_t *)dst)[di] = ((int16_t *)src)[di];
break;
case TL_INT8:
((int16_t *)dst)[di] = (int16_t)((int8_t *)src)[di];
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else
((int16_t *)dst)[di] = (int16_t)val_u32;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16 >= INT16_MAX)
((int16_t *)dst)[di] = INT16_MAX;
else
((int16_t *)dst)[di] = (int16_t)val_u16;
break;
case TL_UINT8:
((int16_t *)dst)[di] = (int16_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((int16_t *)dst)[di] = (int16_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_INT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_d <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_f <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_i32 <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_i32;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else if (val_i16 <= INT8_MIN)
((int8_t *)dst)[di] = INT8_MIN;
else
((int8_t *)dst)[di] = (int8_t)val_i16;
break;
case TL_INT8:
((int8_t *)dst)[di] = ((int8_t *)src)[di];
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else
((int8_t *)dst)[di] = (int8_t)val_u32;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else
((int8_t *)dst)[di] = (int8_t)val_u16;
break;
case TL_UINT8:
val_u8 = ((uint8_t *)src)[di];
if (val_u8 >= INT8_MAX)
((int8_t *)dst)[di] = INT8_MAX;
else
((int8_t *)dst)[di] = (int8_t)val_u8;
break;
case TL_BOOL:
((int8_t *)dst)[di] = (int8_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT32:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= UINT32_MAX)
((uint32_t *)dst)[di] = UINT32_MAX;
else if (val_d < 0)
((uint32_t *)dst)[di] = 0;
else
((uint32_t *)dst)[di] = (uint32_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= UINT32_MAX)
((uint32_t *)dst)[di] = UINT32_MAX;
else if (val_f < 0)
((uint32_t *)dst)[di] = 0;
else
((uint32_t *)dst)[di] = (uint32_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= 0)
((uint32_t *)dst)[di] = (uint32_t)val_i32;
else
((uint32_t *)dst)[di] = 0;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= 0)
((uint32_t *)dst)[di] = (uint32_t)val_i16;
else
((uint32_t *)dst)[di] = 0;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8 >= 0)
((uint32_t *)dst)[di] = (uint32_t)val_i8;
else
((uint32_t *)dst)[di] = 0;
break;
case TL_UINT32:
((uint32_t *)dst)[di] = ((uint32_t *)src)[di];
break;
case TL_UINT16:
((uint32_t *)dst)[di] = (uint32_t)((uint16_t *)src)[di];
break;
case TL_UINT8:
((uint32_t *)dst)[di] = (uint32_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((uint32_t *)dst)[di] = (uint32_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT16:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else if (val_d < 0)
((uint16_t *)dst)[di] = 0;
else
((uint16_t *)dst)[di] = (uint16_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else if (val_f < 0)
((uint16_t *)dst)[di] = 0;
else
((uint16_t *)dst)[di] = (uint16_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else if (val_i32 < 0)
((uint16_t *)dst)[di] = 0;
else
((uint16_t *)dst)[di] = (uint16_t)val_i32;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= 0)
((uint16_t *)dst)[di] = (uint16_t)val_i16;
else
((uint16_t *)dst)[di] = 0;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8 >= 0)
((uint16_t *)dst)[di] = (uint16_t)val_i8;
else
((uint16_t *)dst)[di] = 0;
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= UINT16_MAX)
((uint16_t *)dst)[di] = UINT16_MAX;
else
((uint16_t *)dst)[di] = (uint16_t)val_u32;
break;
case TL_UINT16:
((uint16_t *)dst)[di] = ((uint16_t *)src)[di];
break;
case TL_UINT8:
((uint16_t *)dst)[di] = (uint16_t)((uint8_t *)src)[di];
break;
case TL_BOOL:
((uint16_t *)dst)[di] = (uint16_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_UINT8:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_d < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_d;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_f < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_f;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_i32 < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_i32;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else if (val_i16 < 0)
((uint8_t *)dst)[di] = 0;
else
((uint8_t *)dst)[di] = (uint8_t)val_i16;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8 >= 0)
((uint8_t *)dst)[di] = (uint8_t)val_i8;
else
((uint8_t *)dst)[di] = 0;
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else
((uint8_t *)dst)[di] = (uint8_t)val_u32;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16 >= UINT8_MAX)
((uint8_t *)dst)[di] = UINT8_MAX;
else
((uint8_t *)dst)[di] = (uint8_t)val_u16;
break;
case TL_UINT8:
((uint8_t *)dst)[di] = ((uint8_t *)src)[di];
break;
case TL_BOOL:
((uint8_t *)dst)[di] = (uint8_t)((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_BOOL:
switch (dtype_s) {
case TL_DOUBLE:
val_d = ((double *)src)[di];
if (val_d > 0 || val_d < 0)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_FLOAT:
val_f = ((float *)src)[di];
if (val_f > 0 || val_f < 0)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_INT32:
val_i32 = ((int32_t *)src)[di];
if (val_i32)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_INT16:
val_i16 = ((int16_t *)src)[di];
if (val_i16)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_INT8:
val_i8 = ((int8_t *)src)[di];
if (val_i8)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_UINT32:
val_u32 = ((uint32_t *)src)[di];
if (val_u32)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_UINT16:
val_u16 = ((uint16_t *)src)[di];
if (val_u16)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_UINT8:
val_u8 = ((uint8_t *)src)[di];
if (val_u8)
((tl_bool_t *)dst)[di] = TL_TRUE;
else
((tl_bool_t *)dst)[di] = TL_FALSE;
break;
case TL_BOOL:
((tl_bool_t *)dst)[di] = ((tl_bool_t *)src)[di];
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
}
tl_tensor *tl_tensor_convert_cuda(const tl_tensor *src, tl_tensor *dst,
tl_dtype dtype_d)
{
tl_dtype dtype_s;
int thread_num, block_num;
assert(src && tl_is_device_mem(src->data));
if (dst) {
assert(tl_is_device_mem(dst->data));
assert(tl_tensor_issameshape(src, dst));
assert(dst->dtype == dtype_d);
} else {
dst = tl_tensor_zeros_cuda(src->ndim, src->dims, dtype_d);
}
dtype_s = src->dtype;
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
convert_kernel<<<block_num, BLOCK_SIZE>>>(src->data, dst->data,
dtype_s, dtype_d,
BLOCK_SIZE, thread_num);
tl_cuda_device_sync();
return dst;
}
template <typename T>
static __global__ void transpose_kernel(T *src, T *dst, int ndim,
int *s_dims, int *d_dims,
int *axes, int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
if (di >= total)
return;
int s_ids[TL_MAXDIM], d_ids[TL_MAXDIM];
get_coords(di, d_ids, ndim, d_dims);
for (int i = 0; i < ndim; i++)
s_ids[axes[i]] = d_ids[i];
int si = get_index(s_ids, ndim, s_dims);
dst[di] = src[si];
}
tl_tensor *tl_tensor_transpose_cuda(const tl_tensor *src, tl_tensor *dst,
const int *axes)
{
int i;
#ifndef NDEBUG
int tmp[TL_MAXDIM] = {0};
for (i = 0; i < src->ndim; i++)
tmp[axes[i]] = 1;
for (i = 0; i < src->ndim; i++)
assert(tmp[i] && "axes don't match src tensor's shape");
assert(src && tl_is_device_mem(src->data));
#endif
if (dst) {
#ifndef NDEBUG
assert(tl_is_device_mem(dst->data));
assert(src->dtype == dst->dtype);
assert(src->len == dst->len);
assert(src->ndim == dst->ndim);
for (i = 0; i < dst->ndim; i++)
assert(src->dims[axes[i]] = dst->dims[i]);
#endif
} else {
int d_dims[TL_MAXDIM];
for (i = 0; i < src->ndim; i++)
d_dims[i] = src->dims[axes[i]];
dst = tl_tensor_zeros_cuda(src->ndim, d_dims, src->dtype);
}
int *axes_device;
int *s_dims, *d_dims;
int thread_num, block_num;
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
s_dims = (int *)tl_clone_h2d(src->dims, sizeof(int) * src->ndim);
d_dims = (int *)tl_clone_h2d(dst->dims, sizeof(int) * dst->ndim);
axes_device = (int *)tl_clone_h2d(axes, sizeof(int) * src->ndim);
switch (src->dtype) {
case TL_DOUBLE:
transpose_kernel<double><<<block_num, BLOCK_SIZE>>>((double *)src->data,
(double *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_FLOAT:
transpose_kernel<float><<<block_num, BLOCK_SIZE>>>((float *)src->data,
(float *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_INT32:
transpose_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t *)src->data,
(int32_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_INT16:
transpose_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t *)src->data,
(int16_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_INT8:
transpose_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t *)src->data,
(int8_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT32:
transpose_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t *)src->data,
(uint32_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT16:
transpose_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t *)src->data,
(uint16_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_UINT8:
transpose_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t *)src->data,
(uint8_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
case TL_BOOL:
transpose_kernel<tl_bool_t><<<block_num, BLOCK_SIZE>>>((tl_bool_t *)src->data,
(tl_bool_t *)dst->data,
dst->ndim,
s_dims, d_dims,
axes_device,
BLOCK_SIZE,
thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
tl_cuda_device_sync();
tl_free_cuda(s_dims);
tl_free_cuda(d_dims);
tl_free_cuda(axes_device);
return dst;
}
template <typename T>
static __global__ void nearest_resize_kernel(const T *src, T *dst, int ndim,
const int *dims, const int *new_dims,
int block_size, int total)
{
int di = blockIdx.x * block_size + threadIdx.x;
__shared__ float scales[TL_MAXDIM];
if (di < ndim)
scales[di] = (float)dims[di] / (float)new_dims[di];
if (di > total)
return;
int si;
float rounded;
int src_coords[TL_MAXDIM];
int dst_coords[TL_MAXDIM];
get_coords(di, dst_coords, ndim, new_dims);
for (int i = 0; i < ndim; i++) {
rounded = roundf(((float)dst_coords[i] + 0.5) * scales[i] - 0.5);
convert_device(&src_coords[i], TL_INT32, &rounded, TL_FLOAT);
}
si = get_index(src_coords, ndim, dims);
dst[di] = src[si];
}
tl_tensor *tl_tensor_resize_cuda(const tl_tensor *src, tl_tensor *dst,
const int *new_dims, tl_resize_type rtype)
{
assert(src && src->data);
assert(new_dims);
tl_check_resize_type(rtype);
if (dst) {
assert(dst->data);
assert(dst->dtype == src->dtype);
assert(dst->ndim == src->ndim);
} else {
dst = tl_tensor_zeros_cuda(src->ndim, new_dims, src->dtype);
}
int block_num, thread_num;
int *dims_cuda, *new_dims_cuda;
dims_cuda = (int *)tl_clone_h2d(src->dims, sizeof(int)*src->ndim);
new_dims_cuda = (int *)tl_clone_h2d(new_dims, sizeof(int)*src->ndim);
thread_num = dst->len;
block_num = BLOCK_NUM(BLOCK_SIZE, thread_num);
switch (rtype) {
case TL_NEAREST:
switch (src->dtype) {
case TL_DOUBLE:
nearest_resize_kernel<double><<<block_num, BLOCK_SIZE>>>((double*)src->data, (double*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_FLOAT:
nearest_resize_kernel<float><<<block_num, BLOCK_SIZE>>>((float*)src->data, (float*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_INT32:
nearest_resize_kernel<int32_t><<<block_num, BLOCK_SIZE>>>((int32_t*)src->data, (int32_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_INT16:
nearest_resize_kernel<int16_t><<<block_num, BLOCK_SIZE>>>((int16_t*)src->data, (int16_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_INT8:
nearest_resize_kernel<int8_t><<<block_num, BLOCK_SIZE>>>((int8_t*)src->data, (int8_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_UINT32:
nearest_resize_kernel<uint32_t><<<block_num, BLOCK_SIZE>>>((uint32_t*)src->data, (uint32_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_UINT16:
nearest_resize_kernel<uint16_t><<<block_num, BLOCK_SIZE>>>((uint16_t*)src->data, (uint16_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
case TL_UINT8:
nearest_resize_kernel<uint8_t><<<block_num, BLOCK_SIZE>>>((uint8_t*)src->data, (uint8_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
case TL_BOOL:
nearest_resize_kernel<tl_bool_t><<<block_num, BLOCK_SIZE>>>((tl_bool_t*)src->data, (tl_bool_t*)dst->data, src->ndim, dims_cuda, new_dims_cuda, BLOCK_SIZE, thread_num);
break;
default:
assert(0 && "unsupported tl_dtype");
break;
}
break;
case TL_LINEAR:
assert(0 && "not support TL_LINEAR yet");
break;
default:
assert(0 && "unsupported tl_resize_type");
break;
}
tl_cuda_device_sync();
tl_free_cuda(dims_cuda);
tl_free_cuda(new_dims_cuda);
return dst;
}
|
2d2b48927aea9ebdb212f42d4b30063745162b30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaBMTKernel_MultiDim.cuh"
#include <iostream>
#include <stdint.h>
#include <stdio.h>
//#define _DEBUG
using namespace std;
PRECISION * fa_d, * fb_d, * fc_d,
* fp_d, * fwrk1_d, * fwrk2_d, * fbnd_d,
* gosa_d,
* gosa_h;
int mimax, mjmax, mkmax;
typedef void * PtrObj;
__global__ void bmtJacobiKernel(
PRECISION * a, PRECISION * b, PRECISION * c,
PRECISION * p, PRECISION * wrk1, PRECISION * wrk2,
PRECISION * bnd, PRECISION * gosa,
int imax, int jmax, int kmax,
int mimax, int mjmax, int mkmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
PRECISION s0, ss, omega = 0.8;
// __shared__ PRECISION wgosa;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
// int tid = (threadIdx.z * (blockDim.y * blockDim.x)) +
// (threadIdx.y * blockDim.x) +
// threadIdx.x;
// if (tid == 0)
// wgosa = 0.0;
// __syncthreads();
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
int om0 = 0;
int om1 = 1 * mimax * mjmax * mkmax;
int om2 = 2 * mimax * mjmax * mkmax;
int om3 = 3 * mimax * mjmax * mkmax;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
int oi = i * mjmax * mkmax;
int oi1 = (i + 1) * mjmax * mkmax;
int oin = (i - 1) * mjmax * mkmax;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
int oj = j * mkmax;
int oj1 = (j + 1) * mkmax;
int ojn = (j - 1) * mkmax;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
int ok = k;
int ok1 = k + 1;
int okn = k - 1;
int temp = oi+oj+ok;
s0 = a[om0+temp] * p[oi1 + oj + ok]
+ a[om1+temp] * p[oi + oj1 + ok]
+ a[om2+temp] * p[oi + oj + ok1]
+ b[om0+temp] * ( p[oi1 + oj1 + ok ] - p[oi1 + ojn + ok ]
- p[oin + oj1 + ok ] + p[oin + ojn + ok ] )
+ b[om1+temp] * ( p[oi + oj1 + ok1] - p[oi + ojn + ok1]
- p[oi + oj1 + okn] + p[oi + ojn + okn] )
+ b[om2+temp] * ( p[oi1 + oj + ok1] - p[oin + oj + ok1]
- p[oi1 + oj + okn] + p[oin + oj + okn] )
+ c[om0+temp] * p[oin + oj + ok ]
+ c[om1+temp] * p[oi + ojn + ok ]
+ c[om2+temp] * p[oi + oj + okn]
+ wrk1[temp];
ss = ( s0 * a[om3+temp] - p[temp] ) * bnd[temp];
atomicAdd(gosa, ss*ss);
wrk2[temp] = p[temp] + omega * ss;
}
}
}
// __syncthreads();
/*
for (i=1;i<imax-1;++i) {
for (j=1;j<jmax-1;++j) {
for (k=1;k<kmax-1;++k) {
*/
#if 0
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
#endif
#if 0
if (tid == 0) {
printf("gosa: %f\n", wgosa);
atomicAdd(gosa, wgosa);
}
#endif
}
__global__ void bmtUpdatePressureKernel(
PRECISION * p, PRECISION * wrk2,
int imax, int jmax, int kmax,
int mimax, int mjmax, int mkmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
int oi = i * mjmax * mkmax;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
int oj = j * mkmax;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
int temp = oi + oj + k;
p[temp] = wrk2[temp];
}
}
}
}
#define CHK_ERR(str) \
do { \
hipError_t ce = str; \
if (ce != hipSuccess) \
return ce; \
} while (0)
hipError_t bmtInitDeviceMemory(
Matrix * pa, Matrix * pb, Matrix * pc,
Matrix * pp, Matrix * pwrk1, Matrix * pwrk2,
Matrix * pbnd, int peid) {
int devCnt = 0;
CHK_ERR( hipGetDeviceCount(&devCnt));
CHK_ERR( hipSetDevice(peid % devCnt));
gosa_h = new PRECISION();
CHK_ERR( hipMalloc(&gosa_d, sizeof(PRECISION)));
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
mimax = config.mimax;
mjmax = config.mjmax;
mkmax = config.mkmax;
CHK_ERR( hipMalloc(&fa_d, 4 * memreq_3d));
CHK_ERR( hipMalloc(&fb_d, 3 * memreq_3d));
CHK_ERR( hipMalloc(&fc_d, 3 * memreq_3d));
CHK_ERR( hipMalloc(&fp_d, memreq_3d));
CHK_ERR( hipMalloc(&fwrk1_d, memreq_3d));
CHK_ERR( hipMalloc(&fwrk2_d, memreq_3d));
CHK_ERR( hipMalloc(&fbnd_d, memreq_3d));
CHK_ERR( hipMemcpy(fa_d, pa->mpVal,
4 * memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fb_d, pb->mpVal,
3 * memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fc_d, pc->mpVal,
3 * memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fp_d, pp->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fwrk1_d, pwrk1->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fwrk2_d, pwrk2->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fbnd_d, pbnd->mpVal,
memreq_3d, hipMemcpyHostToDevice));
return hipSuccess;
}
hipError_t bmtCudaJacobi(PRECISION * gosa, Matrix * pp,
int imax, int jmax, int kmax, int num_blocks, int num_threads) {
dim3 grid(num_blocks, num_blocks, 1); // (16,16,1)
dim3 block(1, 1, num_threads); // (1,1,64)
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
//Jacobi
CHK_ERR( hipMemset(gosa_d, 0, sizeof(PRECISION)));
hipLaunchKernelGGL(( bmtJacobiKernel) , dim3(grid), dim3(block), 0, 0,
fa_d, fb_d, fc_d, fp_d,
fwrk1_d, fwrk2_d, fbnd_d, gosa_d,
imax, jmax, kmax,
mimax, mjmax, mkmax);
CHK_ERR( hipDeviceSynchronize());
//Update Pressure Matrix
hipLaunchKernelGGL(( bmtUpdatePressureKernel) , dim3(grid), dim3(block), 0, 0,
fp_d, fwrk2_d,
imax, jmax, kmax,
mimax, mjmax, mkmax);
CHK_ERR( hipDeviceSynchronize());
CHK_ERR( hipMemcpy(gosa_h, gosa_d,
sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(pp->mpVal, fp_d,
memreq_3d, hipMemcpyDeviceToHost));
*gosa = *gosa_h;
return hipSuccess;
}
| 2d2b48927aea9ebdb212f42d4b30063745162b30.cu | #include "cudaBMTKernel_MultiDim.cuh"
#include <iostream>
#include <stdint.h>
#include <stdio.h>
//#define _DEBUG
using namespace std;
PRECISION * fa_d, * fb_d, * fc_d,
* fp_d, * fwrk1_d, * fwrk2_d, * fbnd_d,
* gosa_d,
* gosa_h;
int mimax, mjmax, mkmax;
typedef void * PtrObj;
__global__ void bmtJacobiKernel(
PRECISION * a, PRECISION * b, PRECISION * c,
PRECISION * p, PRECISION * wrk1, PRECISION * wrk2,
PRECISION * bnd, PRECISION * gosa,
int imax, int jmax, int kmax,
int mimax, int mjmax, int mkmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
PRECISION s0, ss, omega = 0.8;
// __shared__ PRECISION wgosa;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
// int tid = (threadIdx.z * (blockDim.y * blockDim.x)) +
// (threadIdx.y * blockDim.x) +
// threadIdx.x;
// if (tid == 0)
// wgosa = 0.0;
// __syncthreads();
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
int om0 = 0;
int om1 = 1 * mimax * mjmax * mkmax;
int om2 = 2 * mimax * mjmax * mkmax;
int om3 = 3 * mimax * mjmax * mkmax;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
int oi = i * mjmax * mkmax;
int oi1 = (i + 1) * mjmax * mkmax;
int oin = (i - 1) * mjmax * mkmax;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
int oj = j * mkmax;
int oj1 = (j + 1) * mkmax;
int ojn = (j - 1) * mkmax;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
int ok = k;
int ok1 = k + 1;
int okn = k - 1;
int temp = oi+oj+ok;
s0 = a[om0+temp] * p[oi1 + oj + ok]
+ a[om1+temp] * p[oi + oj1 + ok]
+ a[om2+temp] * p[oi + oj + ok1]
+ b[om0+temp] * ( p[oi1 + oj1 + ok ] - p[oi1 + ojn + ok ]
- p[oin + oj1 + ok ] + p[oin + ojn + ok ] )
+ b[om1+temp] * ( p[oi + oj1 + ok1] - p[oi + ojn + ok1]
- p[oi + oj1 + okn] + p[oi + ojn + okn] )
+ b[om2+temp] * ( p[oi1 + oj + ok1] - p[oin + oj + ok1]
- p[oi1 + oj + okn] + p[oin + oj + okn] )
+ c[om0+temp] * p[oin + oj + ok ]
+ c[om1+temp] * p[oi + ojn + ok ]
+ c[om2+temp] * p[oi + oj + okn]
+ wrk1[temp];
ss = ( s0 * a[om3+temp] - p[temp] ) * bnd[temp];
atomicAdd(gosa, ss*ss);
wrk2[temp] = p[temp] + omega * ss;
}
}
}
// __syncthreads();
/*
for (i=1;i<imax-1;++i) {
for (j=1;j<jmax-1;++j) {
for (k=1;k<kmax-1;++k) {
*/
#if 0
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
#endif
#if 0
if (tid == 0) {
printf("gosa: %f\n", wgosa);
atomicAdd(gosa, wgosa);
}
#endif
}
__global__ void bmtUpdatePressureKernel(
PRECISION * p, PRECISION * wrk2,
int imax, int jmax, int kmax,
int mimax, int mjmax, int mkmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
int oi = i * mjmax * mkmax;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
int oj = j * mkmax;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
int temp = oi + oj + k;
p[temp] = wrk2[temp];
}
}
}
}
#define CHK_ERR(str) \
do { \
cudaError_t ce = str; \
if (ce != cudaSuccess) \
return ce; \
} while (0)
cudaError_t bmtInitDeviceMemory(
Matrix * pa, Matrix * pb, Matrix * pc,
Matrix * pp, Matrix * pwrk1, Matrix * pwrk2,
Matrix * pbnd, int peid) {
int devCnt = 0;
CHK_ERR( cudaGetDeviceCount(&devCnt));
CHK_ERR( cudaSetDevice(peid % devCnt));
gosa_h = new PRECISION();
CHK_ERR( cudaMalloc(&gosa_d, sizeof(PRECISION)));
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
mimax = config.mimax;
mjmax = config.mjmax;
mkmax = config.mkmax;
CHK_ERR( cudaMalloc(&fa_d, 4 * memreq_3d));
CHK_ERR( cudaMalloc(&fb_d, 3 * memreq_3d));
CHK_ERR( cudaMalloc(&fc_d, 3 * memreq_3d));
CHK_ERR( cudaMalloc(&fp_d, memreq_3d));
CHK_ERR( cudaMalloc(&fwrk1_d, memreq_3d));
CHK_ERR( cudaMalloc(&fwrk2_d, memreq_3d));
CHK_ERR( cudaMalloc(&fbnd_d, memreq_3d));
CHK_ERR( cudaMemcpy(fa_d, pa->mpVal,
4 * memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fb_d, pb->mpVal,
3 * memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fc_d, pc->mpVal,
3 * memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fp_d, pp->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fwrk1_d, pwrk1->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fwrk2_d, pwrk2->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fbnd_d, pbnd->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
return cudaSuccess;
}
cudaError_t bmtCudaJacobi(PRECISION * gosa, Matrix * pp,
int imax, int jmax, int kmax, int num_blocks, int num_threads) {
dim3 grid(num_blocks, num_blocks, 1); // (16,16,1)
dim3 block(1, 1, num_threads); // (1,1,64)
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
//Jacobi
CHK_ERR( cudaMemset(gosa_d, 0, sizeof(PRECISION)));
bmtJacobiKernel <<<grid, block>>> (
fa_d, fb_d, fc_d, fp_d,
fwrk1_d, fwrk2_d, fbnd_d, gosa_d,
imax, jmax, kmax,
mimax, mjmax, mkmax);
CHK_ERR( cudaDeviceSynchronize());
//Update Pressure Matrix
bmtUpdatePressureKernel <<<grid, block>>> (
fp_d, fwrk2_d,
imax, jmax, kmax,
mimax, mjmax, mkmax);
CHK_ERR( cudaDeviceSynchronize());
CHK_ERR( cudaMemcpy(gosa_h, gosa_d,
sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(pp->mpVal, fp_d,
memreq_3d, cudaMemcpyDeviceToHost));
*gosa = *gosa_h;
return cudaSuccess;
}
|
a1d331d1c36531bbcbbf6e4ca0afb25b417c9ffd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/penlu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PENLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const int div_factor, const Dtype* alpha, const Dtype* beta , const Dtype* eta) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
// out[index] = in[index] : ( ( pow( (::max(in[index], Dtype(0)),eta[c] ) ) * ( in[index] > 0 ) + ( ( exp( beta[c]*::min(in[index], Dtype(0)) ) - 1 ) * alpha[c] ) * (in[index] <= 0) );
if (in[index] > 0){
out[index] = (pow(in[index],eta[c]));
}
else{
out[index] = (( exp(beta[c]*in[index] ) - 1 ) * alpha[c]);
}
}
}
// CUDA kernel for bottom backward
// template <typename Dtype>
// __global__ void M2PELUBackward(const int n, const int channels, const int dim,
// const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
// const int div_factor,
// const Dtype* top_data, const Dtype* alpha, const Dtype* beta, const Dtype* gamma) {
// CUDA_KERNEL_LOOP(index, n) {
// int c = (index / dim) % channels / div_factor;
// out_diff[index] = in_diff[index] * ((in_data[index] > 0)
// + (in_data[index] <= 0) * beta[c] * ( top_data[index] + alpha[c]*exp(gamma[c]) ) );
// }
// }
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PENLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff_alpha, Dtype* out_diff_beta, Dtype* out_diff_eta,
const int channels, const int dim, const int div_factor, const Dtype* top_data, Dtype* out_diff,
const Dtype* alpha, const Dtype* beta, const Dtype* eta ) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff_alpha[index] = in_diff[index] * ( exp(beta[c]*in_data[index] ) - 1 ) * (in_data[index] <= 0);
out_diff_beta[index] = in_diff[index] * ( in_data[index] * (top_data[index] + alpha[c] ) ) * (in_data[index] <= 0);
out_diff_eta[index] = in_diff[index] * ( top_data[index]*log(in_data[index]) ) * (in_data[index] > 0);
out_diff[index] = in_diff[index] * ( (eta[c]*(top_data[index]/in_data[index])) * (in_data[index] > 0) + (in_data[index] <= 0) * beta[c] * ( top_data[index] + alpha[c] ) );
for ( int k = 1; k < rows; k++ ) {
int tmp_index = index + k*rowPitch;
out_diff_alpha[index] += in_diff[tmp_index] * ( exp(beta[c] * in_data[tmp_index] ) - 1 )* (in_data[tmp_index] <= 0);
out_diff_beta[index] += in_diff[tmp_index] * ( in_data[tmp_index] * (top_data[tmp_index] + alpha[c] ) ) * (in_data[tmp_index] <= 0);
out_diff_eta[index] += in_diff[tmp_index] * ( top_data[tmp_index] * log(in_data[tmp_index]) * (in_data[tmp_index] > 0) );
out_diff[tmp_index] = in_diff[tmp_index] * ( (eta[c]*top_data[tmp_index]/in_data[tmp_index]) * (in_data[tmp_index] > 0) + (in_data[tmp_index] <= 0) * beta[c] * ( top_data[tmp_index] + alpha[c] ) );
}
}
}
template <typename Dtype>
void PENLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const int div_factor = channel_shared_ ? channels : 1;
const Dtype* alpha = this->blobs_[0]->gpu_data();
const Dtype* beta = this->blobs_[1]->gpu_data();
const Dtype* eta = this->blobs_[2]->gpu_data();
// For in-place computation
if (top[0] == bottom[0]) {
// exit(0);
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PENLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, div_factor, alpha, beta, eta);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PENLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const int div_factor = channel_shared_ ? channels : 1;
const Dtype* alpha = this->blobs_[0]->gpu_data();
const Dtype* beta = this->blobs_[1]->gpu_data();
const Dtype* eta = this->blobs_[2]->gpu_data();
// For in-place computation
if (top[0] == bottom[0]) {
// exit(0);
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param alpha
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
// if (this->param_propagate_down_[0]) {
if (1) {
Dtype* alpha_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* beta_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* eta_diff = this->blobs_[2]->mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PENLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_alpha.mutable_gpu_diff(), backward_buff_beta.mutable_gpu_diff(), backward_buff_eta.mutable_gpu_diff(),
channels, dim, div_factor, top_data, bottom_diff, alpha, beta, eta);
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum_alpha;
Dtype dsum_beta;
Dtype dsum_eta;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_alpha.gpu_diff(),
multiplier_.gpu_data(), &dsum_alpha);
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_beta.gpu_diff(),
multiplier_.gpu_data(), &dsum_beta);
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_eta.gpu_diff(),
multiplier_.gpu_data(), &dsum_eta);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum_alpha), alpha_diff);
caffe_gpu_add_scalar(this->blobs_[1]->count(), Dtype(dsum_beta), beta_diff );
caffe_gpu_add_scalar(this->blobs_[2]->count(), Dtype(dsum_eta), eta_diff );
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_alpha.gpu_diff(), multiplier_.gpu_data(), 1.,
alpha_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_beta.gpu_diff(), multiplier_.gpu_data(), 1.,
beta_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_eta.gpu_diff(), multiplier_.gpu_data(), 1.,
eta_diff);
}
}
// Propagate to bottom
// if (propagate_down[0]) {
// Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// // const Dtype* slope_data = this->blobs_[0]->gpu_data();
// // int div_factor = channel_shared_ ? channels : 1;
// // NOLINT_NEXT_LINE(whitespace/operators)
// M2PELUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
// CAFFE_CUDA_NUM_THREADS>>>(
// count, channels, dim, top_diff, bottom_data, bottom_diff, div_factor,
// top_data, alpha, beta, gamma);
// CUDA_POST_KERNEL_CHECK;
// }
}
INSTANTIATE_LAYER_GPU_FUNCS(PENLULayer);
} // namespace caffe
| a1d331d1c36531bbcbbf6e4ca0afb25b417c9ffd.cu | #include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/penlu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PENLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const int div_factor, const Dtype* alpha, const Dtype* beta , const Dtype* eta) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
// out[index] = in[index] : ( ( pow( (std::max(in[index], Dtype(0)),eta[c] ) ) * ( in[index] > 0 ) + ( ( exp( beta[c]*std::min(in[index], Dtype(0)) ) - 1 ) * alpha[c] ) * (in[index] <= 0) );
if (in[index] > 0){
out[index] = (pow(in[index],eta[c]));
}
else{
out[index] = (( exp(beta[c]*in[index] ) - 1 ) * alpha[c]);
}
}
}
// CUDA kernel for bottom backward
// template <typename Dtype>
// __global__ void M2PELUBackward(const int n, const int channels, const int dim,
// const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
// const int div_factor,
// const Dtype* top_data, const Dtype* alpha, const Dtype* beta, const Dtype* gamma) {
// CUDA_KERNEL_LOOP(index, n) {
// int c = (index / dim) % channels / div_factor;
// out_diff[index] = in_diff[index] * ((in_data[index] > 0)
// + (in_data[index] <= 0) * beta[c] * ( top_data[index] + alpha[c]*exp(gamma[c]) ) );
// }
// }
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PENLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff_alpha, Dtype* out_diff_beta, Dtype* out_diff_eta,
const int channels, const int dim, const int div_factor, const Dtype* top_data, Dtype* out_diff,
const Dtype* alpha, const Dtype* beta, const Dtype* eta ) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff_alpha[index] = in_diff[index] * ( exp(beta[c]*in_data[index] ) - 1 ) * (in_data[index] <= 0);
out_diff_beta[index] = in_diff[index] * ( in_data[index] * (top_data[index] + alpha[c] ) ) * (in_data[index] <= 0);
out_diff_eta[index] = in_diff[index] * ( top_data[index]*log(in_data[index]) ) * (in_data[index] > 0);
out_diff[index] = in_diff[index] * ( (eta[c]*(top_data[index]/in_data[index])) * (in_data[index] > 0) + (in_data[index] <= 0) * beta[c] * ( top_data[index] + alpha[c] ) );
for ( int k = 1; k < rows; k++ ) {
int tmp_index = index + k*rowPitch;
out_diff_alpha[index] += in_diff[tmp_index] * ( exp(beta[c] * in_data[tmp_index] ) - 1 )* (in_data[tmp_index] <= 0);
out_diff_beta[index] += in_diff[tmp_index] * ( in_data[tmp_index] * (top_data[tmp_index] + alpha[c] ) ) * (in_data[tmp_index] <= 0);
out_diff_eta[index] += in_diff[tmp_index] * ( top_data[tmp_index] * log(in_data[tmp_index]) * (in_data[tmp_index] > 0) );
out_diff[tmp_index] = in_diff[tmp_index] * ( (eta[c]*top_data[tmp_index]/in_data[tmp_index]) * (in_data[tmp_index] > 0) + (in_data[tmp_index] <= 0) * beta[c] * ( top_data[tmp_index] + alpha[c] ) );
}
}
}
template <typename Dtype>
void PENLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const int div_factor = channel_shared_ ? channels : 1;
const Dtype* alpha = this->blobs_[0]->gpu_data();
const Dtype* beta = this->blobs_[1]->gpu_data();
const Dtype* eta = this->blobs_[2]->gpu_data();
// For in-place computation
if (top[0] == bottom[0]) {
// exit(0);
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PENLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, div_factor, alpha, beta, eta);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PENLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const int div_factor = channel_shared_ ? channels : 1;
const Dtype* alpha = this->blobs_[0]->gpu_data();
const Dtype* beta = this->blobs_[1]->gpu_data();
const Dtype* eta = this->blobs_[2]->gpu_data();
// For in-place computation
if (top[0] == bottom[0]) {
// exit(0);
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param alpha
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
// if (this->param_propagate_down_[0]) {
if (1) {
Dtype* alpha_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* beta_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* eta_diff = this->blobs_[2]->mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PENLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS>>>(
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_alpha.mutable_gpu_diff(), backward_buff_beta.mutable_gpu_diff(), backward_buff_eta.mutable_gpu_diff(),
channels, dim, div_factor, top_data, bottom_diff, alpha, beta, eta);
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum_alpha;
Dtype dsum_beta;
Dtype dsum_eta;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_alpha.gpu_diff(),
multiplier_.gpu_data(), &dsum_alpha);
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_beta.gpu_diff(),
multiplier_.gpu_data(), &dsum_beta);
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_eta.gpu_diff(),
multiplier_.gpu_data(), &dsum_eta);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum_alpha), alpha_diff);
caffe_gpu_add_scalar(this->blobs_[1]->count(), Dtype(dsum_beta), beta_diff );
caffe_gpu_add_scalar(this->blobs_[2]->count(), Dtype(dsum_eta), eta_diff );
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_alpha.gpu_diff(), multiplier_.gpu_data(), 1.,
alpha_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_beta.gpu_diff(), multiplier_.gpu_data(), 1.,
beta_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_eta.gpu_diff(), multiplier_.gpu_data(), 1.,
eta_diff);
}
}
// Propagate to bottom
// if (propagate_down[0]) {
// Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// // const Dtype* slope_data = this->blobs_[0]->gpu_data();
// // int div_factor = channel_shared_ ? channels : 1;
// // NOLINT_NEXT_LINE(whitespace/operators)
// M2PELUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
// CAFFE_CUDA_NUM_THREADS>>>(
// count, channels, dim, top_diff, bottom_data, bottom_diff, div_factor,
// top_data, alpha, beta, gamma);
// CUDA_POST_KERNEL_CHECK;
// }
}
INSTANTIATE_LAYER_GPU_FUNCS(PENLULayer);
} // namespace caffe
|
7c022a4ef6ca3d76e1389a0f0583e8a327aea522.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_4_right;
int xdim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_4_right;
int ydim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_4_right;
int xdim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_4_right;
int ydim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim0_update_halo_kernel2_zvel_plus_4_right * \
ydim0_update_halo_kernel2_zvel_plus_4_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim1_update_halo_kernel2_zvel_plus_4_right * \
ydim1_update_halo_kernel2_zvel_plus_4_right * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_plus_4_right_gpu(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(-4, 0, 0)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(-4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_4_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right *
ydim0_update_halo_kernel2_zvel_plus_4_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right *
ydim1_update_halo_kernel2_zvel_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 53))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel2_zvel_plus_4_right");
OPS_kernels[53].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_right_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_right_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_right_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_right_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_plus_4_right_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_plus_4_right_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_plus_4_right_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_right), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[53].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 53;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 53;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel2_zvel_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 7c022a4ef6ca3d76e1389a0f0583e8a327aea522.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_4_right;
int xdim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_4_right;
int ydim0_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_4_right;
int xdim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_4_right;
int ydim1_update_halo_kernel2_zvel_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim0_update_halo_kernel2_zvel_plus_4_right * \
ydim0_update_halo_kernel2_zvel_plus_4_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_plus_4_right * (y) + \
xdim1_update_halo_kernel2_zvel_plus_4_right * \
ydim1_update_halo_kernel2_zvel_plus_4_right * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_plus_4_right_gpu(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(-4, 0, 0)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(-4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_4_right(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_right *
ydim0_update_halo_kernel2_zvel_plus_4_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_right *
ydim1_update_halo_kernel2_zvel_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 53))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel2_zvel_plus_4_right");
OPS_kernels[53].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_right_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_right_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_right_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_right_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_plus_4_right_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_plus_4_right_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_plus_4_right_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_zvel_plus_4_right<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[53].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[53].mpi_time += t2 - t1;
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[53].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 53;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 53;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(53, "update_halo_kernel2_zvel_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
2119b9f4ad3a1865bcb40a455b3cf51f96b6e1e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
/*!
* \file mpcd/CommunicatorGPU.cu
* \brief Implementation of communication algorithms on the GPU
*/
#ifdef ENABLE_MPI
#include "CommunicatorGPU.cuh"
#include "CommunicatorUtilities.h"
#include "ReductionOperators.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#pragma GCC diagnostic pop
#if __CUDACC_VER_MAJOR__ >= 11
#include <hipcub/hipcub.hpp>
#else
#include "hoomd/extern/cub/hipcub/hipcub.hpp"
#endif
namespace hoomd
{
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Select a particle for migration
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* Checks for particles being out of bounds, and aggregates send flags.
*/
__global__ void
stage_particles(unsigned int* d_comm_flag, const Scalar4* d_pos, unsigned int N, const BoxDim box)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 lo = box.getLo();
const Scalar3 hi = box.getHi();
unsigned int flags = 0;
if (pos.x >= hi.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::east);
else if (pos.x < lo.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::west);
if (pos.y >= hi.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::north);
else if (pos.y < lo.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::south);
if (pos.z >= hi.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::up);
else if (pos.z < lo.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::down);
d_comm_flag[idx] = flags;
}
} // end namespace kernel
//! Functor to select a particle for migration
struct get_migrate_key : public thrust::unary_function<const unsigned int, unsigned int>
{
const uint3 my_pos; //!< My domain decomposition position
const Index3D di; //!< Domain indexer
const unsigned int mask; //!< Mask of allowed directions
const unsigned int* cart_ranks; //!< Rank lookup table
//! Constructor
/*!
* \param _my_pos Domain decomposition position
* \param _di Domain indexer
* \param _mask Mask of allowed directions
* \param _cart_ranks Rank lookup table
*/
get_migrate_key(const uint3 _my_pos,
const Index3D _di,
const unsigned int _mask,
const unsigned int* _cart_ranks)
: my_pos(_my_pos), di(_di), mask(_mask), cart_ranks(_cart_ranks)
{
}
//! Generate key for a sent particle
/*!
* \param element Particle data being sent
*/
__device__ __forceinline__ unsigned int operator()(const mpcd::detail::pdata_element& element)
{
const unsigned int flags = element.comm_flag;
int ix, iy, iz;
ix = iy = iz = 0;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::east))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::east)))
ix = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::west))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::west)))
ix = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::north))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::north)))
iy = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::south))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::south)))
iy = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::up))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::up)))
iz = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::down))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::down)))
iz = -1;
int i = my_pos.x;
int j = my_pos.y;
int k = my_pos.z;
i += ix;
if (i == (int)di.getW())
i = 0;
else if (i < 0)
i += di.getW();
j += iy;
if (j == (int)di.getH())
j = 0;
else if (j < 0)
j += di.getH();
k += iz;
if (k == (int)di.getD())
k = 0;
else if (k < 0)
k += di.getD();
return cart_ranks[di(i, j, k)];
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* \returns Accumulated communication flags of all particles
*/
hipError_t mpcd::gpu::stage_particles(unsigned int* d_comm_flag,
const Scalar4* d_pos,
const unsigned int N,
const BoxDim& box,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_particles);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_particles), dim3(grid), dim3(run_block_size), 0, 0, d_comm_flag, d_pos, N, box);
return hipSuccess;
}
/*!
* \param d_sendbuf Particle data buffer to sort
* \param d_neigh_send Neighbor ranks that particles are being sent to (output)
* \param d_num_send Number of particles being sent to each neighbor
* \param d_tmp_keys Temporary array (size \a Nsend) used for sorting
* \param grid_pos Grid position of the rank
* \param di Domain decomposition indexer
* \param mask Sending mask for the current stage
* \param d_cart_ranks Cartesian array of domains
* \param Nsend Number of particles in send buffer
*
* \returns The number of unique neighbor ranks to send to
*
* The communication flags in \a d_sendbuf are first transformed into a destination
* rank (see mpcd::gpu::get_migrate_key). The send buffer is then sorted using
* the destination rank as the key. Run-length encoding is then performed to
* determine the number of particles going to each destination rank, and how
* many ranks will be sent to.
*/
size_t mpcd::gpu::sort_comm_send_buffer(mpcd::detail::pdata_element* d_sendbuf,
unsigned int* d_neigh_send,
unsigned int* d_num_send,
unsigned int* d_tmp_keys,
const uint3 grid_pos,
const Index3D& di,
const unsigned int mask,
const unsigned int* d_cart_ranks,
const unsigned int Nsend)
{
// transform extracted communication flags into destination rank
thrust::device_ptr<mpcd::detail::pdata_element> sendbuf(d_sendbuf);
thrust::device_ptr<unsigned int> keys(d_tmp_keys);
thrust::transform(sendbuf,
sendbuf + Nsend,
keys,
mpcd::gpu::get_migrate_key(grid_pos, di, mask, d_cart_ranks));
// sort the destination ranks
thrust::sort_by_key(keys, keys + Nsend, sendbuf);
// run length encode to get the number going to each rank
thrust::device_ptr<unsigned int> neigh_send(d_neigh_send);
thrust::device_ptr<unsigned int> num_send(d_num_send);
size_t num_neigh = thrust::reduce_by_key(keys,
keys + Nsend,
thrust::constant_iterator<int>(1),
neigh_send,
num_send)
.first
- neigh_send;
return num_neigh;
}
/*!
* \param d_req_flags Reduced requested communication flags (output)
* \param d_tmp Temporary storage for reduction
* \param tmp_bytes Number of temporary storage bytes requested
* \param d_comm_flags Communication flags to reduce
* \param N Number of local particles
*
* Bitwise OR reduction is performed on the communication flags to determine
* requested migration direction.
*
* \note This function must be called \b twice. The first call sizes the temporary
* arrays. The caller must then allocate the necessary temporary storage, and then
* call again to perform the reduction.
*/
void mpcd::gpu::reduce_comm_flags(unsigned int* d_req_flags,
void* d_tmp,
size_t& tmp_bytes,
const unsigned int* d_comm_flags,
const unsigned int N)
{
mpcd::ops::BitwiseOr bit_or;
hipcub::DeviceReduce::Reduce(d_tmp,
tmp_bytes,
d_comm_flags,
d_req_flags,
N,
bit_or,
(unsigned int)0);
}
namespace mpcd
{
namespace gpu
{
//! Wrap a particle in a pdata_element
struct wrap_particle_op
: public thrust::unary_function<const mpcd::detail::pdata_element, mpcd::detail::pdata_element>
{
const BoxDim box; //!< The box for which we are applying boundary conditions
//! Constructor
/*!
* \param _box Shifted simulation box for wrapping
*/
wrap_particle_op(const BoxDim _box) : box(_box) { }
//! Wrap position information inside particle data element
/*!
* \param p Particle data element
* \returns The particle data element with wrapped coordinates
*/
__device__ mpcd::detail::pdata_element operator()(const mpcd::detail::pdata_element p)
{
mpcd::detail::pdata_element ret = p;
int3 image = make_int3(0, 0, 0);
box.wrap(ret.pos, image);
return ret;
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param n_recv Number of particles in buffer
* \param d_in Buffer of particle data elements
* \param box Box for which to apply boundary conditions
*/
void mpcd::gpu::wrap_particles(const unsigned int n_recv,
mpcd::detail::pdata_element* d_in,
const BoxDim& box)
{
// Wrap device ptr
thrust::device_ptr<mpcd::detail::pdata_element> in_ptr(d_in);
// Apply box wrap to input buffer
thrust::transform(in_ptr, in_ptr + n_recv, in_ptr, mpcd::gpu::wrap_particle_op(box));
}
} // end namespace hoomd
#endif // ENABLE_MPI
| 2119b9f4ad3a1865bcb40a455b3cf51f96b6e1e0.cu | // Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
/*!
* \file mpcd/CommunicatorGPU.cu
* \brief Implementation of communication algorithms on the GPU
*/
#ifdef ENABLE_MPI
#include "CommunicatorGPU.cuh"
#include "CommunicatorUtilities.h"
#include "ReductionOperators.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#pragma GCC diagnostic pop
#if __CUDACC_VER_MAJOR__ >= 11
#include <cub/device/device_reduce.cuh>
#else
#include "hoomd/extern/cub/cub/device/device_reduce.cuh"
#endif
namespace hoomd
{
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Select a particle for migration
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* Checks for particles being out of bounds, and aggregates send flags.
*/
__global__ void
stage_particles(unsigned int* d_comm_flag, const Scalar4* d_pos, unsigned int N, const BoxDim box)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
const Scalar4 postype = d_pos[idx];
const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const Scalar3 lo = box.getLo();
const Scalar3 hi = box.getHi();
unsigned int flags = 0;
if (pos.x >= hi.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::east);
else if (pos.x < lo.x)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::west);
if (pos.y >= hi.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::north);
else if (pos.y < lo.y)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::south);
if (pos.z >= hi.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::up);
else if (pos.z < lo.z)
flags |= static_cast<unsigned int>(mpcd::detail::send_mask::down);
d_comm_flag[idx] = flags;
}
} // end namespace kernel
//! Functor to select a particle for migration
struct get_migrate_key : public thrust::unary_function<const unsigned int, unsigned int>
{
const uint3 my_pos; //!< My domain decomposition position
const Index3D di; //!< Domain indexer
const unsigned int mask; //!< Mask of allowed directions
const unsigned int* cart_ranks; //!< Rank lookup table
//! Constructor
/*!
* \param _my_pos Domain decomposition position
* \param _di Domain indexer
* \param _mask Mask of allowed directions
* \param _cart_ranks Rank lookup table
*/
get_migrate_key(const uint3 _my_pos,
const Index3D _di,
const unsigned int _mask,
const unsigned int* _cart_ranks)
: my_pos(_my_pos), di(_di), mask(_mask), cart_ranks(_cart_ranks)
{
}
//! Generate key for a sent particle
/*!
* \param element Particle data being sent
*/
__device__ __forceinline__ unsigned int operator()(const mpcd::detail::pdata_element& element)
{
const unsigned int flags = element.comm_flag;
int ix, iy, iz;
ix = iy = iz = 0;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::east))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::east)))
ix = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::west))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::west)))
ix = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::north))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::north)))
iy = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::south))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::south)))
iy = -1;
if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::up))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::up)))
iz = 1;
else if ((flags & static_cast<unsigned int>(mpcd::detail::send_mask::down))
&& (mask & static_cast<unsigned int>(mpcd::detail::send_mask::down)))
iz = -1;
int i = my_pos.x;
int j = my_pos.y;
int k = my_pos.z;
i += ix;
if (i == (int)di.getW())
i = 0;
else if (i < 0)
i += di.getW();
j += iy;
if (j == (int)di.getH())
j = 0;
else if (j < 0)
j += di.getH();
k += iz;
if (k == (int)di.getD())
k = 0;
else if (k < 0)
k += di.getD();
return cart_ranks[di(i, j, k)];
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param d_comm_flag Communication flags to write out
* \param d_pos Device array of particle positions
* \param N Number of local particles
* \param box Local box
*
* \returns Accumulated communication flags of all particles
*/
cudaError_t mpcd::gpu::stage_particles(unsigned int* d_comm_flag,
const Scalar4* d_pos,
const unsigned int N,
const BoxDim& box,
const unsigned int block_size)
{
unsigned int max_block_size;
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_particles);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N / run_block_size + 1);
mpcd::gpu::kernel::stage_particles<<<grid, run_block_size>>>(d_comm_flag, d_pos, N, box);
return cudaSuccess;
}
/*!
* \param d_sendbuf Particle data buffer to sort
* \param d_neigh_send Neighbor ranks that particles are being sent to (output)
* \param d_num_send Number of particles being sent to each neighbor
* \param d_tmp_keys Temporary array (size \a Nsend) used for sorting
* \param grid_pos Grid position of the rank
* \param di Domain decomposition indexer
* \param mask Sending mask for the current stage
* \param d_cart_ranks Cartesian array of domains
* \param Nsend Number of particles in send buffer
*
* \returns The number of unique neighbor ranks to send to
*
* The communication flags in \a d_sendbuf are first transformed into a destination
* rank (see mpcd::gpu::get_migrate_key). The send buffer is then sorted using
* the destination rank as the key. Run-length encoding is then performed to
* determine the number of particles going to each destination rank, and how
* many ranks will be sent to.
*/
size_t mpcd::gpu::sort_comm_send_buffer(mpcd::detail::pdata_element* d_sendbuf,
unsigned int* d_neigh_send,
unsigned int* d_num_send,
unsigned int* d_tmp_keys,
const uint3 grid_pos,
const Index3D& di,
const unsigned int mask,
const unsigned int* d_cart_ranks,
const unsigned int Nsend)
{
// transform extracted communication flags into destination rank
thrust::device_ptr<mpcd::detail::pdata_element> sendbuf(d_sendbuf);
thrust::device_ptr<unsigned int> keys(d_tmp_keys);
thrust::transform(sendbuf,
sendbuf + Nsend,
keys,
mpcd::gpu::get_migrate_key(grid_pos, di, mask, d_cart_ranks));
// sort the destination ranks
thrust::sort_by_key(keys, keys + Nsend, sendbuf);
// run length encode to get the number going to each rank
thrust::device_ptr<unsigned int> neigh_send(d_neigh_send);
thrust::device_ptr<unsigned int> num_send(d_num_send);
size_t num_neigh = thrust::reduce_by_key(keys,
keys + Nsend,
thrust::constant_iterator<int>(1),
neigh_send,
num_send)
.first
- neigh_send;
return num_neigh;
}
/*!
* \param d_req_flags Reduced requested communication flags (output)
* \param d_tmp Temporary storage for reduction
* \param tmp_bytes Number of temporary storage bytes requested
* \param d_comm_flags Communication flags to reduce
* \param N Number of local particles
*
* Bitwise OR reduction is performed on the communication flags to determine
* requested migration direction.
*
* \note This function must be called \b twice. The first call sizes the temporary
* arrays. The caller must then allocate the necessary temporary storage, and then
* call again to perform the reduction.
*/
void mpcd::gpu::reduce_comm_flags(unsigned int* d_req_flags,
void* d_tmp,
size_t& tmp_bytes,
const unsigned int* d_comm_flags,
const unsigned int N)
{
mpcd::ops::BitwiseOr bit_or;
cub::DeviceReduce::Reduce(d_tmp,
tmp_bytes,
d_comm_flags,
d_req_flags,
N,
bit_or,
(unsigned int)0);
}
namespace mpcd
{
namespace gpu
{
//! Wrap a particle in a pdata_element
struct wrap_particle_op
: public thrust::unary_function<const mpcd::detail::pdata_element, mpcd::detail::pdata_element>
{
const BoxDim box; //!< The box for which we are applying boundary conditions
//! Constructor
/*!
* \param _box Shifted simulation box for wrapping
*/
wrap_particle_op(const BoxDim _box) : box(_box) { }
//! Wrap position information inside particle data element
/*!
* \param p Particle data element
* \returns The particle data element with wrapped coordinates
*/
__device__ mpcd::detail::pdata_element operator()(const mpcd::detail::pdata_element p)
{
mpcd::detail::pdata_element ret = p;
int3 image = make_int3(0, 0, 0);
box.wrap(ret.pos, image);
return ret;
}
};
} // end namespace gpu
} // end namespace mpcd
/*!
* \param n_recv Number of particles in buffer
* \param d_in Buffer of particle data elements
* \param box Box for which to apply boundary conditions
*/
void mpcd::gpu::wrap_particles(const unsigned int n_recv,
mpcd::detail::pdata_element* d_in,
const BoxDim& box)
{
// Wrap device ptr
thrust::device_ptr<mpcd::detail::pdata_element> in_ptr(d_in);
// Apply box wrap to input buffer
thrust::transform(in_ptr, in_ptr + n_recv, in_ptr, mpcd::gpu::wrap_particle_op(box));
}
} // end namespace hoomd
#endif // ENABLE_MPI
|
3fb3e7c1d9f998d365775ed49be50e1ffb9e1ba0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ###########################################################################
* Copyright (c) 2010, Los Alamos National Security, LLC.
* All rights reserved.
*
* Copyright 2010. Los Alamos National Security, LLC. This software was
* produced under U.S. Government contract DE-AC52-06NA25396 for Los
* Alamos National Laboratory (LANL), which is operated by Los Alamos
* National Security, LLC for the U.S. Department of Energy. The
* U.S. Government has rights to use, reproduce, and distribute this
* software. NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY,
* LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY
* FOR THE USE OF THIS SOFTWARE. If software is modified to produce
* derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Additionally, redistribution and use in source and binary forms,
* with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Los Alamos National Security, LLC, Los
* Alamos National Laboratory, LANL, the U.S. Government, nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ###########################################################################
*
* Notes
*
* #####
*/
#include "scout/Runtime/opengl/glyph_vertex.h"
using namespace scout;
#define NUM_THREADS_PER_BLOCK 128
#define NUM_BLOCKS 8
// moves points out from center then back in
__global__ void movePoints(unsigned int npoints, glyph_vertex* abo,
unsigned int counter)
{
bool out = false;
if (counter%100 < 50) out = true;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < npoints) {
if (out) {
if (abo[tid].x > 500) {
abo[tid].x += 5;
} else {
abo[tid].x -= 5;
}
if (abo[tid].y > 500) {
abo[tid].y += 5;
} else {
abo[tid].y -= 5;
}
} else {
if (abo[tid].x > 500) {
abo[tid].x -= 5;
} else {
abo[tid].x += 5;
}
if (abo[tid].y > 500) {
abo[tid].y -= 5;
} else {
abo[tid].y += 5;
}
}
}
}
void compute(unsigned int npoints, glyph_vertex* a, unsigned int counter)
{
glyph_vertex *abo;
hipMalloc(&abo, npoints*sizeof(glyph_vertex));
hipMemcpy(abo, a, npoints*sizeof(glyph_vertex),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( movePoints), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, npoints, abo, counter);
hipMemcpy(a, abo, npoints*sizeof(glyph_vertex),hipMemcpyDeviceToHost);
}
| 3fb3e7c1d9f998d365775ed49be50e1ffb9e1ba0.cu | /*
* ###########################################################################
* Copyright (c) 2010, Los Alamos National Security, LLC.
* All rights reserved.
*
* Copyright 2010. Los Alamos National Security, LLC. This software was
* produced under U.S. Government contract DE-AC52-06NA25396 for Los
* Alamos National Laboratory (LANL), which is operated by Los Alamos
* National Security, LLC for the U.S. Department of Energy. The
* U.S. Government has rights to use, reproduce, and distribute this
* software. NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY,
* LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY
* FOR THE USE OF THIS SOFTWARE. If software is modified to produce
* derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Additionally, redistribution and use in source and binary forms,
* with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Los Alamos National Security, LLC, Los
* Alamos National Laboratory, LANL, the U.S. Government, nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ###########################################################################
*
* Notes
*
* #####
*/
#include "scout/Runtime/opengl/glyph_vertex.h"
using namespace scout;
#define NUM_THREADS_PER_BLOCK 128
#define NUM_BLOCKS 8
// moves points out from center then back in
__global__ void movePoints(unsigned int npoints, glyph_vertex* abo,
unsigned int counter)
{
bool out = false;
if (counter%100 < 50) out = true;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < npoints) {
if (out) {
if (abo[tid].x > 500) {
abo[tid].x += 5;
} else {
abo[tid].x -= 5;
}
if (abo[tid].y > 500) {
abo[tid].y += 5;
} else {
abo[tid].y -= 5;
}
} else {
if (abo[tid].x > 500) {
abo[tid].x -= 5;
} else {
abo[tid].x += 5;
}
if (abo[tid].y > 500) {
abo[tid].y -= 5;
} else {
abo[tid].y += 5;
}
}
}
}
void compute(unsigned int npoints, glyph_vertex* a, unsigned int counter)
{
glyph_vertex *abo;
cudaMalloc(&abo, npoints*sizeof(glyph_vertex));
cudaMemcpy(abo, a, npoints*sizeof(glyph_vertex),cudaMemcpyHostToDevice);
movePoints<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>(npoints, abo, counter);
cudaMemcpy(a, abo, npoints*sizeof(glyph_vertex),cudaMemcpyDeviceToHost);
}
|
d9eb313937b825bd84c986fb585b1a66905ecc93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "CubicInterp.cuh"
#include "Helper.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template <class T> __global__ void RemapKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch);
template <class T> __global__ void RemapReverseKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsdestination, T defvalue, int batch);
template<bool cubicinterp> __global__ void RemapInterpolated2DKernel(cudaTex t_input, tfloat* d_output, float2* d_addresses, int n);
//////////////////
//Data remapping//
//////////////////
template <class T> void d_Remap(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch)
{
size_t TpB = 192;
size_t totalblocks = tmin((elementsmapped + TpB - 1) / TpB, (size_t)32768);
dim3 grid = dim3((uint)totalblocks);
RemapKernel<T> << <grid, (uint)TpB >> > (d_input, d_map, d_output, elementsmapped, elementsoriginal, defvalue, batch);
}
template void d_Remap<tfloat>(tfloat* d_input, size_t* d_map, tfloat* d_output, size_t elementsmapped, size_t elementsoriginal, tfloat defvalue, int batch);
template void d_Remap<tcomplex>(tcomplex* d_input, size_t* d_map, tcomplex* d_output, size_t elementsmapped, size_t elementsoriginal, tcomplex defvalue, int batch);
template void d_Remap<int>(int* d_input, size_t* d_map, int* d_output, size_t elementsmapped, size_t elementsoriginal, int defvalue, int batch);
template <class T> void d_RemapReverse(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsdestination, T defvalue, int batch)
{
size_t TpB = 192;
size_t totalblocks = tmin((elementsmapped + TpB - 1) / TpB, (size_t)32768);
dim3 grid = dim3((uint)totalblocks);
RemapReverseKernel<T> << <grid, (uint)TpB >> > (d_input, d_map, d_output, elementsmapped, elementsdestination, defvalue, batch);
}
template void d_RemapReverse<tfloat>(tfloat* d_input, size_t* d_map, tfloat* d_output, size_t elementsmapped, size_t elementsdestination, tfloat defvalue, int batch);
template void d_RemapReverse<int>(int* d_input, size_t* d_map, int* d_output, size_t elementsmapped, size_t elementsdestination, int defvalue, int batch);
template <class T> void h_Remap(T* h_input, size_t* h_map, T* h_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch)
{
T* d_input = (T*)CudaMallocFromHostArray(h_input, elementsoriginal * batch * sizeof(T));
size_t* d_map = (size_t*)CudaMallocFromHostArray(h_map, elementsmapped * sizeof(size_t));
T* d_output;
hipMalloc((void**)&d_output, elementsmapped * batch * sizeof(T));
d_Remap(d_input, d_map, d_output, elementsmapped, elementsoriginal, defvalue, batch);
hipMemcpy(h_output, d_output, elementsmapped * batch * sizeof(T), hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_map);
hipFree(d_output);
}
template void h_Remap<tfloat>(tfloat* d_input, size_t* d_map, tfloat* d_output, size_t elementsmapped, size_t elementsoriginal, tfloat defvalue, int batch);
template void h_Remap<int>(int* d_input, size_t* d_map, int* d_output, size_t elementsmapped, size_t elementsoriginal, int defvalue, int batch);
template <class T> __global__ void RemapKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch)
{
size_t address;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x;
id < elementsmapped;
id += blockDim.x * gridDim.x)
{
address = d_map[id];
for (size_t b = 0; b < batch; b++)
d_output[id + elementsmapped * b] = d_input[address + elementsoriginal * b];
}
}
template <class T> __global__ void RemapReverseKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsdestination, T defvalue, int batch)
{
size_t address;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x;
id < elementsmapped;
id += blockDim.x * gridDim.x)
{
address = d_map[id];
for (size_t b = 0; b < batch; b++)
d_output[address + elementsdestination * b] = d_input[id + elementsmapped * b];
}
}
/////////////////////
//Texture remapping//
/////////////////////
void d_RemapInterpolated2D(tfloat* d_input, int2 dimsinput, tfloat* d_output, float2* d_addresses, int n, T_INTERP_MODE mode)
{
hipArray* a_input;
cudaTex t_input;
if (mode == T_INTERP_LINEAR)
d_BindTextureToArray(d_input, a_input, t_input, dimsinput, hipFilterModeLinear, false);
else if (mode == T_INTERP_CUBIC)
{
tfloat* d_temp;
hipMalloc((void**)&d_temp, Elements2(dimsinput) * sizeof(tfloat));
hipMemcpy(d_temp, d_input, Elements2(dimsinput) * sizeof(tfloat), hipMemcpyDeviceToDevice);
d_CubicBSplinePrefilter2D(d_temp, dimsinput);
d_BindTextureToArray(d_temp, a_input, t_input, dimsinput, hipFilterModeLinear, false);
hipFree(d_temp);
}
int TpB = tmin(256, NextMultipleOf(n, 32));
int grid = tmin((n + TpB - 1) / TpB, 8192);
if (mode == T_INTERP_LINEAR)
RemapInterpolated2DKernel<false> << <grid, TpB >> > (t_input, d_output, d_addresses, n);
else if (mode == T_INTERP_CUBIC)
RemapInterpolated2DKernel<true> << <grid, TpB >> > (t_input, d_output, d_addresses, n);
hipDestroyTextureObject(t_input);
hipFreeArray(a_input);
}
template<bool cubicinterp> __global__ void RemapInterpolated2DKernel(cudaTex t_input, tfloat* d_output, float2* d_addresses, int n)
{
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += gridDim.x * blockDim.x)
{
float2 address = d_addresses[idx];
if (cubicinterp)
d_output[idx] = cubicTex2D(t_input, address.x, address.y);
else
d_output[idx] = tex2D<tfloat>(t_input, address.x, address.y);
}
}
///////////////////////////////////
//Sparse mask to dense conversion//
///////////////////////////////////
template <class T> void h_MaskSparseToDense(T* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal)
{
size_t lastaddress = 0;
size_t* h_tempforward = (size_t*)malloc(elementsoriginal * sizeof(size_t));
if (h_mapbackward != NULL)
for (size_t i = 0; i < elementsoriginal; i++)
if (h_input[i] > 0)
{
h_tempforward[lastaddress] = i;
h_mapbackward[i] = lastaddress;
lastaddress++;
}
else
h_mapbackward[i] = -1;
else
for (size_t i = 0; i < elementsoriginal; i++)
{
if (h_input[i] > 0)
{
h_tempforward[lastaddress] = i;
lastaddress++;
}
}
if (lastaddress == 0)
{
*h_mapforward = NULL;
elementsmapped = 0;
}
else
{
*h_mapforward = (size_t*)malloc(lastaddress * sizeof(size_t));
memcpy(*h_mapforward, h_tempforward, lastaddress * sizeof(size_t));
elementsmapped = lastaddress;
}
free(h_tempforward);
}
template void h_MaskSparseToDense<float>(float* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<double>(double* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<int>(int* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<bool>(bool* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<char>(char* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template <class T> void d_MaskSparseToDense(T* d_input, size_t** d_mapforward, size_t* d_mapbackward, size_t &elementsmapped, size_t elementsoriginal)
{
T* h_input = (T*)MallocFromDeviceArray(d_input, elementsoriginal * sizeof(T));
size_t* h_mapforward = NULL;
size_t* h_mapbackward = d_mapbackward == NULL ? NULL : (size_t*)malloc(elementsoriginal * sizeof(size_t));
size_t elements = 0;
h_MaskSparseToDense(h_input, &h_mapforward, h_mapbackward, elements, elementsoriginal);
*d_mapforward = h_mapforward == NULL ? NULL : (size_t*)CudaMallocFromHostArray(h_mapforward, elements * sizeof(size_t));
if (d_mapbackward != NULL && h_mapbackward != NULL)
hipMemcpy(d_mapbackward, h_mapbackward, elementsoriginal * sizeof(size_t), hipMemcpyHostToDevice);
elementsmapped = elements;
free(h_input);
if (h_mapbackward != NULL)
free(h_mapbackward);
if (h_mapforward != NULL)
free(h_mapforward);
}
template void d_MaskSparseToDense<float>(float* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<double>(double* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<int>(int* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<bool>(bool* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<char>(char* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
} | d9eb313937b825bd84c986fb585b1a66905ecc93.cu | #include "Prerequisites.cuh"
#include "CubicInterp.cuh"
#include "Helper.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template <class T> __global__ void RemapKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch);
template <class T> __global__ void RemapReverseKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsdestination, T defvalue, int batch);
template<bool cubicinterp> __global__ void RemapInterpolated2DKernel(cudaTex t_input, tfloat* d_output, float2* d_addresses, int n);
//////////////////
//Data remapping//
//////////////////
template <class T> void d_Remap(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch)
{
size_t TpB = 192;
size_t totalblocks = tmin((elementsmapped + TpB - 1) / TpB, (size_t)32768);
dim3 grid = dim3((uint)totalblocks);
RemapKernel<T> << <grid, (uint)TpB >> > (d_input, d_map, d_output, elementsmapped, elementsoriginal, defvalue, batch);
}
template void d_Remap<tfloat>(tfloat* d_input, size_t* d_map, tfloat* d_output, size_t elementsmapped, size_t elementsoriginal, tfloat defvalue, int batch);
template void d_Remap<tcomplex>(tcomplex* d_input, size_t* d_map, tcomplex* d_output, size_t elementsmapped, size_t elementsoriginal, tcomplex defvalue, int batch);
template void d_Remap<int>(int* d_input, size_t* d_map, int* d_output, size_t elementsmapped, size_t elementsoriginal, int defvalue, int batch);
template <class T> void d_RemapReverse(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsdestination, T defvalue, int batch)
{
size_t TpB = 192;
size_t totalblocks = tmin((elementsmapped + TpB - 1) / TpB, (size_t)32768);
dim3 grid = dim3((uint)totalblocks);
RemapReverseKernel<T> << <grid, (uint)TpB >> > (d_input, d_map, d_output, elementsmapped, elementsdestination, defvalue, batch);
}
template void d_RemapReverse<tfloat>(tfloat* d_input, size_t* d_map, tfloat* d_output, size_t elementsmapped, size_t elementsdestination, tfloat defvalue, int batch);
template void d_RemapReverse<int>(int* d_input, size_t* d_map, int* d_output, size_t elementsmapped, size_t elementsdestination, int defvalue, int batch);
template <class T> void h_Remap(T* h_input, size_t* h_map, T* h_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch)
{
T* d_input = (T*)CudaMallocFromHostArray(h_input, elementsoriginal * batch * sizeof(T));
size_t* d_map = (size_t*)CudaMallocFromHostArray(h_map, elementsmapped * sizeof(size_t));
T* d_output;
cudaMalloc((void**)&d_output, elementsmapped * batch * sizeof(T));
d_Remap(d_input, d_map, d_output, elementsmapped, elementsoriginal, defvalue, batch);
cudaMemcpy(h_output, d_output, elementsmapped * batch * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_map);
cudaFree(d_output);
}
template void h_Remap<tfloat>(tfloat* d_input, size_t* d_map, tfloat* d_output, size_t elementsmapped, size_t elementsoriginal, tfloat defvalue, int batch);
template void h_Remap<int>(int* d_input, size_t* d_map, int* d_output, size_t elementsmapped, size_t elementsoriginal, int defvalue, int batch);
template <class T> __global__ void RemapKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsoriginal, T defvalue, int batch)
{
size_t address;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x;
id < elementsmapped;
id += blockDim.x * gridDim.x)
{
address = d_map[id];
for (size_t b = 0; b < batch; b++)
d_output[id + elementsmapped * b] = d_input[address + elementsoriginal * b];
}
}
template <class T> __global__ void RemapReverseKernel(T* d_input, size_t* d_map, T* d_output, size_t elementsmapped, size_t elementsdestination, T defvalue, int batch)
{
size_t address;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x;
id < elementsmapped;
id += blockDim.x * gridDim.x)
{
address = d_map[id];
for (size_t b = 0; b < batch; b++)
d_output[address + elementsdestination * b] = d_input[id + elementsmapped * b];
}
}
/////////////////////
//Texture remapping//
/////////////////////
void d_RemapInterpolated2D(tfloat* d_input, int2 dimsinput, tfloat* d_output, float2* d_addresses, int n, T_INTERP_MODE mode)
{
cudaArray* a_input;
cudaTex t_input;
if (mode == T_INTERP_LINEAR)
d_BindTextureToArray(d_input, a_input, t_input, dimsinput, cudaFilterModeLinear, false);
else if (mode == T_INTERP_CUBIC)
{
tfloat* d_temp;
cudaMalloc((void**)&d_temp, Elements2(dimsinput) * sizeof(tfloat));
cudaMemcpy(d_temp, d_input, Elements2(dimsinput) * sizeof(tfloat), cudaMemcpyDeviceToDevice);
d_CubicBSplinePrefilter2D(d_temp, dimsinput);
d_BindTextureToArray(d_temp, a_input, t_input, dimsinput, cudaFilterModeLinear, false);
cudaFree(d_temp);
}
int TpB = tmin(256, NextMultipleOf(n, 32));
int grid = tmin((n + TpB - 1) / TpB, 8192);
if (mode == T_INTERP_LINEAR)
RemapInterpolated2DKernel<false> << <grid, TpB >> > (t_input, d_output, d_addresses, n);
else if (mode == T_INTERP_CUBIC)
RemapInterpolated2DKernel<true> << <grid, TpB >> > (t_input, d_output, d_addresses, n);
cudaDestroyTextureObject(t_input);
cudaFreeArray(a_input);
}
template<bool cubicinterp> __global__ void RemapInterpolated2DKernel(cudaTex t_input, tfloat* d_output, float2* d_addresses, int n)
{
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += gridDim.x * blockDim.x)
{
float2 address = d_addresses[idx];
if (cubicinterp)
d_output[idx] = cubicTex2D(t_input, address.x, address.y);
else
d_output[idx] = tex2D<tfloat>(t_input, address.x, address.y);
}
}
///////////////////////////////////
//Sparse mask to dense conversion//
///////////////////////////////////
template <class T> void h_MaskSparseToDense(T* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal)
{
size_t lastaddress = 0;
size_t* h_tempforward = (size_t*)malloc(elementsoriginal * sizeof(size_t));
if (h_mapbackward != NULL)
for (size_t i = 0; i < elementsoriginal; i++)
if (h_input[i] > 0)
{
h_tempforward[lastaddress] = i;
h_mapbackward[i] = lastaddress;
lastaddress++;
}
else
h_mapbackward[i] = -1;
else
for (size_t i = 0; i < elementsoriginal; i++)
{
if (h_input[i] > 0)
{
h_tempforward[lastaddress] = i;
lastaddress++;
}
}
if (lastaddress == 0)
{
*h_mapforward = NULL;
elementsmapped = 0;
}
else
{
*h_mapforward = (size_t*)malloc(lastaddress * sizeof(size_t));
memcpy(*h_mapforward, h_tempforward, lastaddress * sizeof(size_t));
elementsmapped = lastaddress;
}
free(h_tempforward);
}
template void h_MaskSparseToDense<float>(float* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<double>(double* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<int>(int* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<bool>(bool* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void h_MaskSparseToDense<char>(char* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template <class T> void d_MaskSparseToDense(T* d_input, size_t** d_mapforward, size_t* d_mapbackward, size_t &elementsmapped, size_t elementsoriginal)
{
T* h_input = (T*)MallocFromDeviceArray(d_input, elementsoriginal * sizeof(T));
size_t* h_mapforward = NULL;
size_t* h_mapbackward = d_mapbackward == NULL ? NULL : (size_t*)malloc(elementsoriginal * sizeof(size_t));
size_t elements = 0;
h_MaskSparseToDense(h_input, &h_mapforward, h_mapbackward, elements, elementsoriginal);
*d_mapforward = h_mapforward == NULL ? NULL : (size_t*)CudaMallocFromHostArray(h_mapforward, elements * sizeof(size_t));
if (d_mapbackward != NULL && h_mapbackward != NULL)
cudaMemcpy(d_mapbackward, h_mapbackward, elementsoriginal * sizeof(size_t), cudaMemcpyHostToDevice);
elementsmapped = elements;
free(h_input);
if (h_mapbackward != NULL)
free(h_mapbackward);
if (h_mapforward != NULL)
free(h_mapforward);
}
template void d_MaskSparseToDense<float>(float* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<double>(double* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<int>(int* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<bool>(bool* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
template void d_MaskSparseToDense<char>(char* h_input, size_t** h_mapforward, size_t* h_mapbackward, size_t &elementsmapped, size_t elementsoriginal);
} |
abe49ff752d8509c9d6f3135e3bf5419205c0219.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* imaluengo@gmail.com
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
hipDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",hipGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of hipDeviceProp_t::name
char devicename[devicenamelength];
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory should be enough, we have almost no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
const size_t pixels_per_slice = image_size[0] * image_size[1] ;
const size_t mem_slice_image = sizeof(float)* pixels_per_slice ;
const size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more split should do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amount of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: ander.biguri@gmail.com\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
hipHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
hipHostMalloc((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
hipHostMalloc((void**)&buffer_u, pixels_per_slice*sizeof(float));
hipHostMalloc((void**)&buffer_px, pixels_per_slice*sizeof(float));
hipHostMalloc((void**)&buffer_py, pixels_per_slice*sizeof(float));
hipHostMalloc((void**)&buffer_pz, pixels_per_slice*sizeof(float));
}
// We should be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
hipSetDevice(dev);
// F
hipMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
hipMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
hipMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
hipMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
hipMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
hipDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*pixels_per_slice;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to compute all the image. The ordering of these loops
// need to be like this due to the bounding layers between splits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared between GPUs fully without extra splits, then there is an easy way of synchronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
// Precompute indices and needed bytes
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*pixels_per_slice;
linear_idx_start=pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
// All these are async
hipMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
hipMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+3]);
hipMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+4]);
hipMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_u), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_p), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Synchronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
hipSetDevice(dev+1);
hipMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
hipDeviceSynchronize();
if (dev>0){
// U
hipSetDevice(dev-1);
hipMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
hipMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we don't need to get these out.
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
hipMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+2]);
hipMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+3]);
hipMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices = ((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels = curr_slices*pixels_per_slice;
hipMemcpyAsync(dst+slices_per_split*pixels_per_slice*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
hipFree(d_src[dev]);
hipFree(d_u [dev]);
hipFree(d_pz[dev]);
hipFree(d_py[dev]);
hipFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
hipHostFree(h_px);
hipHostFree(h_py);
hipHostFree(h_pz);
}else if(splits==1){
hipHostFree(buffer_u);
hipHostFree(buffer_px);
hipHostFree(buffer_py);
hipHostFree(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
hipHostUnregister(src);
hipHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
| abe49ff752d8509c9d6f3135e3bf5419205c0219.cu | /*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* imaluengo@gmail.com
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
cudaDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",cudaGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of cudaDeviceProp::name
char devicename[devicenamelength];
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory should be enough, we have almost no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
const size_t pixels_per_slice = image_size[0] * image_size[1] ;
const size_t mem_slice_image = sizeof(float)* pixels_per_slice ;
const size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more split should do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amount of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: ander.biguri@gmail.com\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
cudaHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
cudaMallocHost((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
cudaMallocHost((void**)&buffer_u, pixels_per_slice*sizeof(float));
cudaMallocHost((void**)&buffer_px, pixels_per_slice*sizeof(float));
cudaMallocHost((void**)&buffer_py, pixels_per_slice*sizeof(float));
cudaMallocHost((void**)&buffer_pz, pixels_per_slice*sizeof(float));
}
// We should be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
cudaSetDevice(dev);
// F
cudaMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
cudaMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
cudaMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
cudaMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
cudaMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
cudaDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*pixels_per_slice;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to compute all the image. The ordering of these loops
// need to be like this due to the bounding layers between splits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared between GPUs fully without extra splits, then there is an easy way of synchronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
// Precompute indices and needed bytes
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*pixels_per_slice;
linear_idx_start=pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
// All these are async
cudaMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
cudaMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+3]);
cudaMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_u<<<grid, block,0,stream[dev*nStream_device]>>>(d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_p<<<grid, block,0,stream[dev*nStream_device]>>>(d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Synchronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
cudaSetDevice(dev+1);
cudaMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
cudaDeviceSynchronize();
if (dev>0){
// U
cudaSetDevice(dev-1);
cudaMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
cudaMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we don't need to get these out.
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
cudaMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+2]);
cudaMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+3]);
cudaMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices = ((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels = curr_slices*pixels_per_slice;
cudaMemcpyAsync(dst+slices_per_split*pixels_per_slice*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
cudaFree(d_src[dev]);
cudaFree(d_u [dev]);
cudaFree(d_pz[dev]);
cudaFree(d_py[dev]);
cudaFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
cudaFreeHost(h_px);
cudaFreeHost(h_py);
cudaFreeHost(h_pz);
}else if(splits==1){
cudaFreeHost(buffer_u);
cudaFreeHost(buffer_px);
cudaFreeHost(buffer_py);
cudaFreeHost(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
cudaHostUnregister(src);
cudaHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
d8d3821a448d98d7c078b6ba2cc4e8b81a3c35df.hip | // !!! This is a file automatically generated by hipify!!!
/* Blocking: Figure 2: https://dl.acm.org/doi/pdf/10.1145/3016078.2851152/*/
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "hipsparse.h"
#define FTYPE float
#define CLEANUP(s) \
do { \
printf ("%s\n", s); \
if (yHostPtr) free(yHostPtr); \
if (zHostPtr) free(zHostPtr); \
if (xIndHostPtr) free(xIndHostPtr); \
if (xValHostPtr) free(xValHostPtr); \
if (cooRowIndexHostPtr) free(cooRowIndexHostPtr);\
if (cooColIndexHostPtr) free(cooColIndexHostPtr);\
if (cooValHostPtr) free(cooValHostPtr); \
if (y) hipFree(y); \
if (z) hipFree(z); \
if (xInd) hipFree(xInd); \
if (xVal) hipFree(xVal); \
if (csrRowPtr) hipFree(csrRowPtr); \
if (cooRowIndex) hipFree(cooRowIndex); \
if (cooColIndex) hipFree(cooColIndex); \
if (cooVal) hipFree(cooVal); \
if (handle) hipsparseDestroy(handle); \
fflush (stdout); \
} while (0)
struct v_struct {
int row, col;
FTYPE val;
};
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
int compare1(const void *a, const void *b)
{
if (((struct v_struct *)a)->row - ((struct v_struct *)b)->row > 0) return 1;
if (((struct v_struct *)a)->row - ((struct v_struct *)b)->row < 0) return -1;
return ((struct v_struct *)a)->col - ((struct v_struct *)b)->col;
}
int main(int argc, char **argv)
{
if(argc < 4){
printf("Wrong arg list. Try with: ./exec matrix rhs nBlocks.\n");
printf("E.g., ./spmm_blocked tmp.mtx 32 1 \n");
exit(0);
}
FILE *fp;
FILE *fpo = fopen("SpMM_GPU_SP_spmm.out", "a");
srand(time(NULL));
hipError_t cudaStat1,cudaStat2,cudaStat3,cudaStat4,cudaStat5,cudaStat6;
hipsparseStatus_t status;
hipsparseHandle_t handle=0;
hipsparseMatDescr_t descra=0;
int n, nr, nc, nnz, nflag, nnz_vector, i, j;
struct v_struct *temp_v;
char buf[300];
int sflag;
int dummy, pre_count=0, tmp_ne;
int sc = atoi(argv[2]);
int nBlock = atoi(argv[3]);
fp = fopen(argv[1], "r");
fgets(buf, 300, fp);
if(strstr(buf, "symmetric") != NULL || strstr(buf, "Hermitian") != NULL) sflag = 1; // symmetric
else sflag = 0;
if(strstr(buf, "pattern") != NULL) nflag = 0; // non-value
else if(strstr(buf, "complex") != NULL) nflag = -1;
else nflag = 1;
while(1) {
pre_count++;
fgets(buf, 300, fp);
if(strstr(buf, "%") == NULL) break;
}
fclose(fp);
fp = fopen(argv[1], "r");
for(i=0;i<pre_count;i++)
fgets(buf, 300, fp);
fscanf(fp, "%d %d %d", &nr, &nc, &nnz);
nnz *= (sflag+1);
temp_v = (struct v_struct *)malloc(sizeof(struct v_struct)*(nnz+1));
/*------------------------
Read input matrix
------------------------*/
for(i=0;i<nnz;i++) {
fscanf(fp, "%d %d", &temp_v[i].row, &temp_v[i].col);
temp_v[i].row--; temp_v[i].col--;
if(temp_v[i].row < 0 || temp_v[i].row >= nr || temp_v[i].col < 0 || temp_v[i].col >= nc) {
fprintf(stdout, "A vertex id is out of range %d %d\n", temp_v[i].row, temp_v[i].col);
exit(0);
}
if(nflag == 0) temp_v[i].val = (FTYPE)(rand()%1048576)/1048576;
else if(nflag == 1) {
FTYPE ftemp;
fscanf(fp, " %f ", &ftemp);
temp_v[i].val = ftemp;
} else { // complex
FTYPE ftemp1, ftemp2;
fscanf(fp, " %f %f ", &ftemp1, &ftemp2);
temp_v[i].val = ftemp1;
}
if(sflag == 1) {
i++;
temp_v[i].row = temp_v[i-1].col;
temp_v[i].col = temp_v[i-1].row;
temp_v[i].val = temp_v[i-1].val;
}
}
qsort(temp_v, nnz, sizeof(struct v_struct), compare1);
/*------------------------
Estimate block nnz
------------------------*/
int p = nBlock;
int rootp = sqrt(nBlock);
int nRowsBlock = (nr + rootp - 1) / rootp;
int nColsBlock = (nc + rootp - 1) / rootp;
// sc = (sc + rootp -1)/rootp;
nr = nRowsBlock;
nc = nColsBlock;
int *nnzBlock = (int*)malloc(nBlock * sizeof(int));
memset(nnzBlock, 0, nBlock * sizeof(int));
int bi, bj, bid;
for(i = 0; i < nnz; i++){
bi = temp_v[i].row/nRowsBlock;
bj = temp_v[i].col/nColsBlock;
bid = bi * rootp + bj;
nnzBlock[bid]++;
}
int tot_nnz = 0;
/*------------------------
Created blocked COO/CSR
------------------------*/
int **b_rowPtr = (int **)malloc(nBlock * sizeof (int *) );
int **b_rowInd = (int **)malloc(nBlock * sizeof (int *) );
int **b_colInd = (int **)malloc(nBlock * sizeof (int *) );
FTYPE **b_val = (FTYPE **)malloc(nBlock * sizeof (FTYPE *) );
for (int b = 0; b < nBlock; b++) {
b_rowPtr[b] = (int *) malloc ((nRowsBlock) * sizeof (int)) ;
memset(&b_rowPtr[b][0], 0, nRowsBlock * sizeof(int));
b_rowInd[b] = (int *) malloc ((nnzBlock[b]) * sizeof (int)) ;
b_colInd[b] = (int *) malloc ((nnzBlock[b]) * sizeof (int)) ;
b_val[b] = (FTYPE *) malloc ((nnzBlock[b]) * sizeof (FTYPE )) ;
}
memset( nnzBlock, 0, (nBlock) * sizeof(int) );
for(i = 0; i < nnz; i++){
int br = temp_v[i].row/nRowsBlock;
int bc = temp_v[i].col/nColsBlock;
int bId = br * rootp + bc;
int local_rowInd = temp_v[i].row % nRowsBlock;
b_rowPtr[bId][1+local_rowInd] = nnzBlock[bId]+1;
b_rowInd[bId][nnzBlock[bId]] = temp_v[i].row % nRowsBlock;
b_colInd[bId][nnzBlock[bId]] = temp_v[i].col % nColsBlock;
b_val[bId][nnzBlock[bId]] = temp_v[i].val;
nnzBlock[bId]++;
}
for (int b = 0; b < nBlock; ++b)
{
for(int r = 0; r < nRowsBlock; r++)
{
if(b_rowPtr[b][r] == 0)
b_rowPtr[b][r] = b_rowPtr[b][r-1];
}
}
/* 1.5D Blocked SpMM */
float tot_time = 0;
// loop through blocks of each benchmark
for (int br = 0; br < rootp; ++br) {//loop over row blocks of A
for (int bk = 0; bk < rootp; ++bk) {// loop over col blocks of A
int b = br * rootp + bk;
if(!nnzBlock[b]) continue;
nnz = nnzBlock[b];
nr = nRowsBlock;
nc = nColsBlock;
int *ccsr_v, *ccsr_e; FTYPE *ccsr_ev;
hipMalloc((void **) &ccsr_v, sizeof(int)*(nr+1));
hipMalloc((void **) &ccsr_e, sizeof(int)*nnz);
hipMalloc((void **) &ccsr_ev, sizeof(FTYPE)*nnz);
hipMemcpy(ccsr_v, &(b_rowPtr[b][0]), sizeof(int)*(nr+1), hipMemcpyHostToDevice);
hipMemcpy(ccsr_e, &(b_colInd[b][0]), sizeof(int)*(nnz), hipMemcpyHostToDevice);
hipMemcpy(ccsr_ev, &(b_val[b][0]), sizeof(FTYPE)*(nnz), hipMemcpyHostToDevice);
/* initialize cusparse library */
status= hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
return EXIT_FAILURE;
}
/* create and setup matrix descriptor */
status= hipsparseCreateMatDescr(&descra);
if (status != HIPSPARSE_STATUS_SUCCESS) {
return EXIT_FAILURE;
}
hipsparseSetMatType(descra,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descra,HIPSPARSE_INDEX_BASE_ZERO);
hipError_t err = hipSuccess;
FTYPE *y_in, *cy_in, *y_out, *cy_out;
y_in = (FTYPE *)malloc(sizeof(FTYPE)*nc*sc);
y_out = (FTYPE *)malloc(sizeof(FTYPE)*(nr)*sc);
for(int i=0;i<nc*sc;i++)
y_in[i] = ((FTYPE)1);//(rand()%1048576))/1048576;
err = hipMalloc((void **) &cy_in, sizeof(FTYPE)*nc*sc);
if(err != hipSuccess) {fprintf(stdout, "\n"); exit(0); }
err = hipMalloc((void **) &cy_out, sizeof(FTYPE)*(nr)*sc);
if(err != hipSuccess) {fprintf(stdout, "\n"); exit(0); }
hipMemcpy(cy_in, y_in, sizeof(FTYPE)*nc*sc, hipMemcpyHostToDevice);
hipMemset((void *)cy_out, 0, sc*(nr)*sizeof(FTYPE));
float tot_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
const FTYPE alpha=1.0f, beta=0.0f;
/*new SpMM*/
hipsparseSpMatDescr_t a_cusparse;
status = hipsparseCreateCsr(&a_cusparse, nr, nc, nnz,
ccsr_v, ccsr_e, ccsr_ev,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_32F);
hipsparseDnMatDescr_t b_cusparse;
status = hipsparseCreateDnMat(&b_cusparse, nc, sc, sc,
cy_in, HIP_R_32F, HIPSPARSE_ORDER_ROW);
hipsparseDnMatDescr_t c_cusparse;
status = hipsparseCreateDnMat(&c_cusparse, nr, sc, sc,
cy_out, HIP_R_32F, HIPSPARSE_ORDER_ROW);
size_t bufferSize = 0;
status = hipsparseSpMM_bufferSize(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha,
a_cusparse,
b_cusparse,
&beta,
c_cusparse,
HIP_R_32F,
CUSPARSE_SPMM_CSR_ALG2,
&bufferSize);
if (status != HIPSPARSE_STATUS_SUCCESS) return EXIT_FAILURE;
char* externalBuffer = NULL;
hipMalloc(&externalBuffer, bufferSize);
hipDeviceSynchronize();
hipEventRecord(event1,0);
#define ITER (1)
for(int ik=0;ik<ITER;ik++) {
status = hipsparseSpMM(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha,
a_cusparse,
b_cusparse,
&beta,
c_cusparse,
HIP_R_32F,
CUSPARSE_SPMM_CSR_ALG2,
externalBuffer);
}
hipEventRecord(event2,0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&tot_ms, event1, event2);
hipDeviceSynchronize();
if (status != HIPSPARSE_STATUS_SUCCESS) return EXIT_FAILURE;
hipMemcpy(y_out, cy_out, sizeof(FTYPE)*(nr)*sc, hipMemcpyDeviceToHost);
hipFree(cy_out); hipFree(cy_in); free(y_out); free(y_in);
hipFree(externalBuffer);
hipFree(ccsr_v), hipFree(ccsr_e); hipFree(ccsr_ev);
// free(csr_v), free(csr_colIdx); free(csr_vals);
tot_time += tot_ms;
fprintf(stdout, "Block: %d, nnz: %d, tot_ms: %f s\n", b, nnzBlock[b], tot_ms/ITER);
}
}
for (int b = 0; b < rootp; ++b){
free(b_rowPtr[b]); free(b_colInd[b]); free(b_val[b]);
}
fprintf(stdout, "1.5D Blocking, K=%d : nBlocks: %d, nnz: %d, tot_ms: %f ms, GFLOPS: %f \n", sc, nBlock, nnz, tot_time, (double)ITER*(double)nnz*2*sc/tot_time/1000000);
fclose(fpo);
}
| d8d3821a448d98d7c078b6ba2cc4e8b81a3c35df.cu | /* Blocking: Figure 2: https://dl.acm.org/doi/pdf/10.1145/3016078.2851152/*/
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#include "cusparse.h"
#define FTYPE float
#define CLEANUP(s) \
do { \
printf ("%s\n", s); \
if (yHostPtr) free(yHostPtr); \
if (zHostPtr) free(zHostPtr); \
if (xIndHostPtr) free(xIndHostPtr); \
if (xValHostPtr) free(xValHostPtr); \
if (cooRowIndexHostPtr) free(cooRowIndexHostPtr);\
if (cooColIndexHostPtr) free(cooColIndexHostPtr);\
if (cooValHostPtr) free(cooValHostPtr); \
if (y) cudaFree(y); \
if (z) cudaFree(z); \
if (xInd) cudaFree(xInd); \
if (xVal) cudaFree(xVal); \
if (csrRowPtr) cudaFree(csrRowPtr); \
if (cooRowIndex) cudaFree(cooRowIndex); \
if (cooColIndex) cudaFree(cooColIndex); \
if (cooVal) cudaFree(cooVal); \
if (handle) cusparseDestroy(handle); \
fflush (stdout); \
} while (0)
struct v_struct {
int row, col;
FTYPE val;
};
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
int compare1(const void *a, const void *b)
{
if (((struct v_struct *)a)->row - ((struct v_struct *)b)->row > 0) return 1;
if (((struct v_struct *)a)->row - ((struct v_struct *)b)->row < 0) return -1;
return ((struct v_struct *)a)->col - ((struct v_struct *)b)->col;
}
int main(int argc, char **argv)
{
if(argc < 4){
printf("Wrong arg list. Try with: ./exec matrix rhs nBlocks.\n");
printf("E.g., ./spmm_blocked tmp.mtx 32 1 \n");
exit(0);
}
FILE *fp;
FILE *fpo = fopen("SpMM_GPU_SP_spmm.out", "a");
srand(time(NULL));
cudaError_t cudaStat1,cudaStat2,cudaStat3,cudaStat4,cudaStat5,cudaStat6;
cusparseStatus_t status;
cusparseHandle_t handle=0;
cusparseMatDescr_t descra=0;
int n, nr, nc, nnz, nflag, nnz_vector, i, j;
struct v_struct *temp_v;
char buf[300];
int sflag;
int dummy, pre_count=0, tmp_ne;
int sc = atoi(argv[2]);
int nBlock = atoi(argv[3]);
fp = fopen(argv[1], "r");
fgets(buf, 300, fp);
if(strstr(buf, "symmetric") != NULL || strstr(buf, "Hermitian") != NULL) sflag = 1; // symmetric
else sflag = 0;
if(strstr(buf, "pattern") != NULL) nflag = 0; // non-value
else if(strstr(buf, "complex") != NULL) nflag = -1;
else nflag = 1;
while(1) {
pre_count++;
fgets(buf, 300, fp);
if(strstr(buf, "%") == NULL) break;
}
fclose(fp);
fp = fopen(argv[1], "r");
for(i=0;i<pre_count;i++)
fgets(buf, 300, fp);
fscanf(fp, "%d %d %d", &nr, &nc, &nnz);
nnz *= (sflag+1);
temp_v = (struct v_struct *)malloc(sizeof(struct v_struct)*(nnz+1));
/*------------------------
Read input matrix
------------------------*/
for(i=0;i<nnz;i++) {
fscanf(fp, "%d %d", &temp_v[i].row, &temp_v[i].col);
temp_v[i].row--; temp_v[i].col--;
if(temp_v[i].row < 0 || temp_v[i].row >= nr || temp_v[i].col < 0 || temp_v[i].col >= nc) {
fprintf(stdout, "A vertex id is out of range %d %d\n", temp_v[i].row, temp_v[i].col);
exit(0);
}
if(nflag == 0) temp_v[i].val = (FTYPE)(rand()%1048576)/1048576;
else if(nflag == 1) {
FTYPE ftemp;
fscanf(fp, " %f ", &ftemp);
temp_v[i].val = ftemp;
} else { // complex
FTYPE ftemp1, ftemp2;
fscanf(fp, " %f %f ", &ftemp1, &ftemp2);
temp_v[i].val = ftemp1;
}
if(sflag == 1) {
i++;
temp_v[i].row = temp_v[i-1].col;
temp_v[i].col = temp_v[i-1].row;
temp_v[i].val = temp_v[i-1].val;
}
}
qsort(temp_v, nnz, sizeof(struct v_struct), compare1);
/*------------------------
Estimate block nnz
------------------------*/
int p = nBlock;
int rootp = sqrt(nBlock);
int nRowsBlock = (nr + rootp - 1) / rootp;
int nColsBlock = (nc + rootp - 1) / rootp;
// sc = (sc + rootp -1)/rootp;
nr = nRowsBlock;
nc = nColsBlock;
int *nnzBlock = (int*)malloc(nBlock * sizeof(int));
memset(nnzBlock, 0, nBlock * sizeof(int));
int bi, bj, bid;
for(i = 0; i < nnz; i++){
bi = temp_v[i].row/nRowsBlock;
bj = temp_v[i].col/nColsBlock;
bid = bi * rootp + bj;
nnzBlock[bid]++;
}
int tot_nnz = 0;
/*------------------------
Created blocked COO/CSR
------------------------*/
int **b_rowPtr = (int **)malloc(nBlock * sizeof (int *) );
int **b_rowInd = (int **)malloc(nBlock * sizeof (int *) );
int **b_colInd = (int **)malloc(nBlock * sizeof (int *) );
FTYPE **b_val = (FTYPE **)malloc(nBlock * sizeof (FTYPE *) );
for (int b = 0; b < nBlock; b++) {
b_rowPtr[b] = (int *) malloc ((nRowsBlock) * sizeof (int)) ;
memset(&b_rowPtr[b][0], 0, nRowsBlock * sizeof(int));
b_rowInd[b] = (int *) malloc ((nnzBlock[b]) * sizeof (int)) ;
b_colInd[b] = (int *) malloc ((nnzBlock[b]) * sizeof (int)) ;
b_val[b] = (FTYPE *) malloc ((nnzBlock[b]) * sizeof (FTYPE )) ;
}
memset( nnzBlock, 0, (nBlock) * sizeof(int) );
for(i = 0; i < nnz; i++){
int br = temp_v[i].row/nRowsBlock;
int bc = temp_v[i].col/nColsBlock;
int bId = br * rootp + bc;
int local_rowInd = temp_v[i].row % nRowsBlock;
b_rowPtr[bId][1+local_rowInd] = nnzBlock[bId]+1;
b_rowInd[bId][nnzBlock[bId]] = temp_v[i].row % nRowsBlock;
b_colInd[bId][nnzBlock[bId]] = temp_v[i].col % nColsBlock;
b_val[bId][nnzBlock[bId]] = temp_v[i].val;
nnzBlock[bId]++;
}
for (int b = 0; b < nBlock; ++b)
{
for(int r = 0; r < nRowsBlock; r++)
{
if(b_rowPtr[b][r] == 0)
b_rowPtr[b][r] = b_rowPtr[b][r-1];
}
}
/* 1.5D Blocked SpMM */
float tot_time = 0;
// loop through blocks of each benchmark
for (int br = 0; br < rootp; ++br) {//loop over row blocks of A
for (int bk = 0; bk < rootp; ++bk) {// loop over col blocks of A
int b = br * rootp + bk;
if(!nnzBlock[b]) continue;
nnz = nnzBlock[b];
nr = nRowsBlock;
nc = nColsBlock;
int *ccsr_v, *ccsr_e; FTYPE *ccsr_ev;
cudaMalloc((void **) &ccsr_v, sizeof(int)*(nr+1));
cudaMalloc((void **) &ccsr_e, sizeof(int)*nnz);
cudaMalloc((void **) &ccsr_ev, sizeof(FTYPE)*nnz);
cudaMemcpy(ccsr_v, &(b_rowPtr[b][0]), sizeof(int)*(nr+1), cudaMemcpyHostToDevice);
cudaMemcpy(ccsr_e, &(b_colInd[b][0]), sizeof(int)*(nnz), cudaMemcpyHostToDevice);
cudaMemcpy(ccsr_ev, &(b_val[b][0]), sizeof(FTYPE)*(nnz), cudaMemcpyHostToDevice);
/* initialize cusparse library */
status= cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
return EXIT_FAILURE;
}
/* create and setup matrix descriptor */
status= cusparseCreateMatDescr(&descra);
if (status != CUSPARSE_STATUS_SUCCESS) {
return EXIT_FAILURE;
}
cusparseSetMatType(descra,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descra,CUSPARSE_INDEX_BASE_ZERO);
cudaError_t err = cudaSuccess;
FTYPE *y_in, *cy_in, *y_out, *cy_out;
y_in = (FTYPE *)malloc(sizeof(FTYPE)*nc*sc);
y_out = (FTYPE *)malloc(sizeof(FTYPE)*(nr)*sc);
for(int i=0;i<nc*sc;i++)
y_in[i] = ((FTYPE)1);//(rand()%1048576))/1048576;
err = cudaMalloc((void **) &cy_in, sizeof(FTYPE)*nc*sc);
if(err != cudaSuccess) {fprintf(stdout, "\n"); exit(0); }
err = cudaMalloc((void **) &cy_out, sizeof(FTYPE)*(nr)*sc);
if(err != cudaSuccess) {fprintf(stdout, "\n"); exit(0); }
cudaMemcpy(cy_in, y_in, sizeof(FTYPE)*nc*sc, cudaMemcpyHostToDevice);
cudaMemset((void *)cy_out, 0, sc*(nr)*sizeof(FTYPE));
float tot_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
const FTYPE alpha=1.0f, beta=0.0f;
/*new SpMM*/
cusparseSpMatDescr_t a_cusparse;
status = cusparseCreateCsr(&a_cusparse, nr, nc, nnz,
ccsr_v, ccsr_e, ccsr_ev,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F);
cusparseDnMatDescr_t b_cusparse;
status = cusparseCreateDnMat(&b_cusparse, nc, sc, sc,
cy_in, CUDA_R_32F, CUSPARSE_ORDER_ROW);
cusparseDnMatDescr_t c_cusparse;
status = cusparseCreateDnMat(&c_cusparse, nr, sc, sc,
cy_out, CUDA_R_32F, CUSPARSE_ORDER_ROW);
size_t bufferSize = 0;
status = cusparseSpMM_bufferSize(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha,
a_cusparse,
b_cusparse,
&beta,
c_cusparse,
CUDA_R_32F,
CUSPARSE_SPMM_CSR_ALG2,
&bufferSize);
if (status != CUSPARSE_STATUS_SUCCESS) return EXIT_FAILURE;
char* externalBuffer = NULL;
cudaMalloc(&externalBuffer, bufferSize);
cudaDeviceSynchronize();
cudaEventRecord(event1,0);
#define ITER (1)
for(int ik=0;ik<ITER;ik++) {
status = cusparseSpMM(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha,
a_cusparse,
b_cusparse,
&beta,
c_cusparse,
CUDA_R_32F,
CUSPARSE_SPMM_CSR_ALG2,
externalBuffer);
}
cudaEventRecord(event2,0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&tot_ms, event1, event2);
cudaDeviceSynchronize();
if (status != CUSPARSE_STATUS_SUCCESS) return EXIT_FAILURE;
cudaMemcpy(y_out, cy_out, sizeof(FTYPE)*(nr)*sc, cudaMemcpyDeviceToHost);
cudaFree(cy_out); cudaFree(cy_in); free(y_out); free(y_in);
cudaFree(externalBuffer);
cudaFree(ccsr_v), cudaFree(ccsr_e); cudaFree(ccsr_ev);
// free(csr_v), free(csr_colIdx); free(csr_vals);
tot_time += tot_ms;
fprintf(stdout, "Block: %d, nnz: %d, tot_ms: %f s\n", b, nnzBlock[b], tot_ms/ITER);
}
}
for (int b = 0; b < rootp; ++b){
free(b_rowPtr[b]); free(b_colInd[b]); free(b_val[b]);
}
fprintf(stdout, "1.5D Blocking, K=%d : nBlocks: %d, nnz: %d, tot_ms: %f ms, GFLOPS: %f \n", sc, nBlock, nnz, tot_time, (double)ITER*(double)nnz*2*sc/tot_time/1000000);
fclose(fpo);
}
|
1ac55be21a709886973878df6a592487cb088086.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <rocblas.h>
#define NUMGPU 3
void MatrixMultiply(double *d_A, double *d_B, double *d_C, int A_H, int A_W, int B_W, int i, hipblasHandle_t h);
__global__ void Dummy();
int main() {
hipblasHandle_t *handle;
hipStream_t *stream;
double **d_A = (double **)malloc(NUMGPU * sizeof(double *));
double **d_B = (double **)malloc(NUMGPU * sizeof(double *));
double **d_C = (double **)malloc(NUMGPU * sizeof(double *));
handle = (hipblasHandle_t *)malloc(NUMGPU * sizeof(hipblasHandle_t));
stream = (hipStream_t *)malloc(NUMGPU * sizeof(hipStream_t));
for(int i = 0; i < NUMGPU; i++)
{
hipSetDevice(i);
hipMalloc((void **) &d_A[i], 100 * sizeof(double));
hipMalloc((void **) &d_B[i], 100 * sizeof(double));
hipMalloc((void **) &d_C[i], 100 * sizeof(double));
hipblasCreate(&handle[i]);
// hipStreamCreate(&stream[i]);
// hipblasSetStream(handle[i], stream[i]);
}
hipSetDevice(3);
for(int i = 0; i < 100; i++)
hipLaunchKernelGGL(( Dummy), dim3(1), dim3(1), 0, 0, );
for(int j = 0; j < 100000; j++)
{
for(int i = 0; i < NUMGPU; i++)
{
hipSetDevice(i);
MatrixMultiply(d_A[i], d_B[i], d_C[i], 100, 100, 100, i, handle[i]);
}
}
for(int i = 0; i < NUMGPU + 1; i++)
{
hipSetDevice(i);
hipDeviceSynchronize();
}
printf("finished\n");
}
__global__ void Dummy()
{
int i = 0;
for(int j = 0; j < 100000; j++)
i++;
}
void MatrixMultiply(double *d_A, double *d_B, double *d_C, int A_H, int A_W, int B_W, int i, hipblasHandle_t h)
{
const double alp = 1.0f;
const double bet = 0.0f;
hipblasDgemm(h, HIPBLAS_OP_N, HIPBLAS_OP_N, B_W, A_H, A_W, &alp, d_B, B_W, d_A, A_W, &bet, d_C, B_W);
}
| 1ac55be21a709886973878df6a592487cb088086.cu | #include <stdio.h>
#include <cublas_v2.h>
#define NUMGPU 3
void MatrixMultiply(double *d_A, double *d_B, double *d_C, int A_H, int A_W, int B_W, int i, cublasHandle_t h);
__global__ void Dummy();
int main() {
cublasHandle_t *handle;
cudaStream_t *stream;
double **d_A = (double **)malloc(NUMGPU * sizeof(double *));
double **d_B = (double **)malloc(NUMGPU * sizeof(double *));
double **d_C = (double **)malloc(NUMGPU * sizeof(double *));
handle = (cublasHandle_t *)malloc(NUMGPU * sizeof(cublasHandle_t));
stream = (cudaStream_t *)malloc(NUMGPU * sizeof(cudaStream_t));
for(int i = 0; i < NUMGPU; i++)
{
cudaSetDevice(i);
cudaMalloc((void **) &d_A[i], 100 * sizeof(double));
cudaMalloc((void **) &d_B[i], 100 * sizeof(double));
cudaMalloc((void **) &d_C[i], 100 * sizeof(double));
cublasCreate(&handle[i]);
// cudaStreamCreate(&stream[i]);
// cublasSetStream(handle[i], stream[i]);
}
cudaSetDevice(3);
for(int i = 0; i < 100; i++)
Dummy<<<1, 1>>>();
for(int j = 0; j < 100000; j++)
{
for(int i = 0; i < NUMGPU; i++)
{
cudaSetDevice(i);
MatrixMultiply(d_A[i], d_B[i], d_C[i], 100, 100, 100, i, handle[i]);
}
}
for(int i = 0; i < NUMGPU + 1; i++)
{
cudaSetDevice(i);
cudaDeviceSynchronize();
}
printf("finished\n");
}
__global__ void Dummy()
{
int i = 0;
for(int j = 0; j < 100000; j++)
i++;
}
void MatrixMultiply(double *d_A, double *d_B, double *d_C, int A_H, int A_W, int B_W, int i, cublasHandle_t h)
{
const double alp = 1.0f;
const double bet = 0.0f;
cublasDgemm(h, CUBLAS_OP_N, CUBLAS_OP_N, B_W, A_H, A_W, &alp, d_B, B_W, d_A, A_W, &bet, d_C, B_W);
}
|
f4c14a1da229e8329c64115fc888f79695909834.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copyBestPath.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int i = 1;
int *bestPathResult = NULL;
hipMalloc(&bestPathResult, XSIZE*YSIZE);
int *pathResults = NULL;
hipMalloc(&pathResults, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copyBestPath), dim3(gridBlock),dim3(threadBlock), 0, 0, i,bestPathResult,pathResults);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copyBestPath), dim3(gridBlock),dim3(threadBlock), 0, 0, i,bestPathResult,pathResults);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copyBestPath), dim3(gridBlock),dim3(threadBlock), 0, 0, i,bestPathResult,pathResults);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f4c14a1da229e8329c64115fc888f79695909834.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copyBestPath.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int i = 1;
int *bestPathResult = NULL;
cudaMalloc(&bestPathResult, XSIZE*YSIZE);
int *pathResults = NULL;
cudaMalloc(&pathResults, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copyBestPath<<<gridBlock,threadBlock>>>(i,bestPathResult,pathResults);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copyBestPath<<<gridBlock,threadBlock>>>(i,bestPathResult,pathResults);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copyBestPath<<<gridBlock,threadBlock>>>(i,bestPathResult,pathResults);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
90a6f4429b28c2d7ad7e8cb8d0d93338d6ffa57e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_right;
int xdim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_right;
int ydim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_right;
int xdim1_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_right;
int ydim1_update_halo_kernel5_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_right * (y) + \
xdim0_update_halo_kernel5_plus_4_right * \
ydim0_update_halo_kernel5_plus_4_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_right * (y) + \
xdim1_update_halo_kernel5_plus_4_right * \
ydim1_update_halo_kernel5_plus_4_right * (z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(-4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(-4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_right(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right *
ydim0_update_halo_kernel5_plus_4_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right *
ydim1_update_halo_kernel5_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_right_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 90))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(90, "update_halo_kernel5_plus_4_right");
OPS_kernels[90].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_right_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_right_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_plus_4_right_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_plus_4_right_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_plus_4_right_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[90].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_right), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[90].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[90].mpi_time += t2 - t1;
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 90;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 90;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(90, "update_halo_kernel5_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 90a6f4429b28c2d7ad7e8cb8d0d93338d6ffa57e.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_right;
int xdim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_right;
int ydim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_right;
int xdim1_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_right;
int ydim1_update_halo_kernel5_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_right * (y) + \
xdim0_update_halo_kernel5_plus_4_right * \
ydim0_update_halo_kernel5_plus_4_right * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_right * (y) + \
xdim1_update_halo_kernel5_plus_4_right * \
ydim1_update_halo_kernel5_plus_4_right * (z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(-4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(-4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_right(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right *
ydim0_update_halo_kernel5_plus_4_right;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right *
ydim1_update_halo_kernel5_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_right_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 90))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(90, "update_halo_kernel5_plus_4_right");
OPS_kernels[90].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_right_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_right_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_right, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_plus_4_right_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_right, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_plus_4_right_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_right, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_plus_4_right_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_right, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[90].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_plus_4_right<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[90].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[90].mpi_time += t2 - t1;
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 90;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 90;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(90, "update_halo_kernel5_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
9b0368abf6965d0ca815d411e38dbe8fdbf51cfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDilatedMaxPooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationH > 0 && dilationW > 0, 12,
"dilation should be greater than zero, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input->nDimension;
int dimf = 0;
int dimh = 1;
int dimw = 2;
int batchSize = 1;
if (ndim == 4) {
batchSize = input->size[0];
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, ndim == 3 || ndim == 4, 2, input,
"3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size[dimh-1];
int64_t nInputRows = input->size[dimh];
int64_t nInputCols = input->size[dimw];
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, 4, 0, batchSize);
THCUNN_check_dim_size_indices(state, indices, 4, 1, nOutputPlane);
THCUNN_check_dim_size_indices(state, indices, 4, 2, nOutputRows);
THCUNN_check_dim_size_indices(state, indices, 4, 3, nOutputCols);
}
}
void THNN_(SpatialDilatedMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, NULL, NULL, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCUNN_resizeAs_indices(state, indices, output);
THCIndex_t* indices_data = THCIndexTensor_(data)(state, indices);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
hipLaunchKernelGGL(( MaxPoolForward<real, accreal>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data);
THCudaCheck(hipGetLastError());
if(input->nDimension == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialDilatedMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, gradOutput, indices, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
hipLaunchKernelGGL(( MaxPoolBackward<real, accreal>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCTensor_(data)(state, gradOutput),
THCIndexTensor_(data)(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
THCTensor_(data)(state, gradInput));
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradOutput);
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
| 9b0368abf6965d0ca815d411e38dbe8fdbf51cfa.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDilatedMaxPooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationH > 0 && dilationW > 0, 12,
"dilation should be greater than zero, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input->nDimension;
int dimf = 0;
int dimh = 1;
int dimw = 2;
int batchSize = 1;
if (ndim == 4) {
batchSize = input->size[0];
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, ndim == 3 || ndim == 4, 2, input,
"3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size[dimh-1];
int64_t nInputRows = input->size[dimh];
int64_t nInputCols = input->size[dimw];
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, 4, 0, batchSize);
THCUNN_check_dim_size_indices(state, indices, 4, 1, nOutputPlane);
THCUNN_check_dim_size_indices(state, indices, 4, 2, nOutputRows);
THCUNN_check_dim_size_indices(state, indices, 4, 3, nOutputCols);
}
}
void THNN_(SpatialDilatedMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, NULL, NULL, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCUNN_resizeAs_indices(state, indices, output);
THCIndex_t* indices_data = THCIndexTensor_(data)(state, indices);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
MaxPoolForward<real, accreal> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data);
THCudaCheck(cudaGetLastError());
if(input->nDimension == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialDilatedMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, gradOutput, indices, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
MaxPoolBackward<real, accreal> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCTensor_(data)(state, gradOutput),
THCIndexTensor_(data)(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
THCTensor_(data)(state, gradInput));
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
|
0f7e414c0202c142b6facdcd79e7d537a4c5d8cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/////////////////////////////////////////////////////////////////////////////
/// Copyright 2020 Google LLC
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// https://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
/////////////////////////////////////////////////////////////////////////////
/// Modifications: pedro hermosilla (pedro-1.hermosilla-casajus@uni-ulm.de)
/////////////////////////////////////////////////////////////////////////////
#include "defines.hpp"
#include "math_helper.cuh"
#include "cuda_kernel_utils.cuh"
#include "grid_utils.cuh"
#include "build_grid_ds.cuh"
///////////////////////// GPU
/**
* GPU kernel to compute the grid data structure.
* @param pNumPts Number of points.
* @param pKeys Array of keys.
* @param pNumCells Number of cells.
* @param pOutDS Output array with the data structure.
* @paramT D Number of dimensions.
*/
template<int D>
__global__ void build_grid_gpu_kernel(
const unsigned int pNumPts,
const mccnn::int64_m* __restrict__ pKeys,
const mccnn::ipoint<D>* __restrict__ pNumCells,
int2* __restrict__ pOutDS)
{
int initPtIndex = mccnn::compute_global_index_gpu_funct();
int totalThreads = mccnn::compute_total_threads_gpu_funct();
for(int curPtIndex = initPtIndex; curPtIndex < pNumPts; curPtIndex += totalThreads)
{
//Get the key and compute the index into the ds.
mccnn::int64_m curKey = pKeys[curPtIndex];
int dsIndex = mccnn::compute_ds_index_from_key_gpu_funct(curKey, pNumCells[0]);
//Check if it is the first point in the ds cell.
int prevPtIndex = curPtIndex-1;
if(prevPtIndex > 0){
if(dsIndex !=
mccnn::compute_ds_index_from_key_gpu_funct(pKeys[prevPtIndex], pNumCells[0])){
pOutDS[dsIndex].x = curPtIndex;
}
}
//Check if it is the last point in the ds cell.
int nextPtIndex = curPtIndex+1;
if(nextPtIndex == pNumPts){
pOutDS[dsIndex].y = pNumPts;
}else if(dsIndex !=
mccnn::compute_ds_index_from_key_gpu_funct(pKeys[nextPtIndex], pNumCells[0])){
pOutDS[dsIndex].y = nextPtIndex;
}
}
}
///////////////////////// CPU
template<int D>
void mccnn::build_grid_ds_gpu(
std::unique_ptr<IGPUDevice>& pDevice,
const unsigned int pDSSize,
const unsigned int pNumPts,
const mccnn::int64_m* pInGPUPtrKeys,
const int* pInGPUPtrNumCells,
int* pOutGPUPtrDS)
{
//Get the cuda stream.
auto cudaStream = pDevice->getCUDAStream();
#ifdef DEBUG_INFO
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, cudaStream);
#endif
//Get the device properties.
const GpuDeviceProperties& gpuProps = pDevice->get_device_properties();
//Calculate the ideal number of blocks for the selected block size.
unsigned int numMP = gpuProps.numMPs_;
unsigned int blockSize = gpuProps.warpSize_*2;
unsigned int numBlocks = pDevice->get_max_active_block_x_sm(
blockSize,(const void*)build_grid_gpu_kernel<D>, 0);
pDevice->check_error(__FILE__, __LINE__);
//Calculate the total number of blocks to execute.
unsigned int execBlocks = pNumPts/blockSize;
execBlocks += (pNumPts%blockSize != 0)?1:0;
unsigned int totalNumBlocks = numMP*numBlocks;
totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks;
//Initialize to zero the output array.
pDevice->memset(pOutGPUPtrDS, 0, sizeof(int)*pDSSize);
pDevice->check_error(__FILE__, __LINE__);
//Execute the cuda kernel.
hipLaunchKernelGGL(( build_grid_gpu_kernel<D>), dim3(totalNumBlocks), dim3(blockSize), 0, cudaStream,
pNumPts,
pInGPUPtrKeys,
(const mccnn::ipoint<D>*)pInGPUPtrNumCells,
(int2*)pOutGPUPtrDS);
pDevice->check_error(__FILE__, __LINE__);
#ifdef DEBUG_INFO
hipEventRecord(stop, cudaStream);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
int numCells[D];
pDevice->memcpy_device_to_host((void*)&numCells, (void*)pInGPUPtrNumCells, sizeof(int)*D);
int* dsCPU = new int[pDSSize];
pDevice->memcpy_device_to_host((void*)dsCPU, (void*)pOutGPUPtrDS, sizeof(int)*pDSSize);
int maxNumPts = 0;
int minNumPts = pNumPts;
for(int i=0; i < pDSSize; i+=2)
{
int curNumPts = dsCPU[i+1]-dsCPU[i];
if(curNumPts < minNumPts){
minNumPts = curNumPts;
}else if(curNumPts > maxNumPts){
maxNumPts = curNumPts;
}
}
int batchSize = pDSSize/(2*numCells[0]*numCells[1]);
float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_;
fprintf(stderr, "### BUILD GRID ###\n");
fprintf(stderr, "Num points: %d\n", pNumPts);
fprintf(stderr, "Batch size: %d\n", batchSize);
fprintf(stderr, "Grid size: ");
for(int i = 0; i < D; ++i)
fprintf(stderr, "%d ", numCells[i]);
fprintf(stderr, "\n");
fprintf(stderr, "Min num points x tube: %d\n", minNumPts);
fprintf(stderr, "Max num points x tube: %d\n", maxNumPts);
fprintf(stderr, "Occupancy: %f\n", gpuOccupancy);
fprintf(stderr, "Execution time: %f\n", milliseconds);
fprintf(stderr, "\n");
delete[] dsCPU;
#endif
}
///////////////////////// CPU Template declaration
#define BUILD_GRID_DS_TEMP_DECL(Dims) \
template void mccnn::build_grid_ds_gpu<Dims>( \
std::unique_ptr<IGPUDevice>& pDevice, \
const unsigned int pDSSize, \
const unsigned int pNumPts, \
const mccnn::int64_m* pInGPUPtrKeys, \
const int* pInGPUPtrNumCells, \
int* pOutGPUPtrDS);
DECLARE_TEMPLATE_DIMS(BUILD_GRID_DS_TEMP_DECL) | 0f7e414c0202c142b6facdcd79e7d537a4c5d8cd.cu | /////////////////////////////////////////////////////////////////////////////
/// Copyright 2020 Google LLC
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// https://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
/////////////////////////////////////////////////////////////////////////////
/// Modifications: pedro hermosilla (pedro-1.hermosilla-casajus@uni-ulm.de)
/////////////////////////////////////////////////////////////////////////////
#include "defines.hpp"
#include "math_helper.cuh"
#include "cuda_kernel_utils.cuh"
#include "grid_utils.cuh"
#include "build_grid_ds.cuh"
///////////////////////// GPU
/**
* GPU kernel to compute the grid data structure.
* @param pNumPts Number of points.
* @param pKeys Array of keys.
* @param pNumCells Number of cells.
* @param pOutDS Output array with the data structure.
* @paramT D Number of dimensions.
*/
template<int D>
__global__ void build_grid_gpu_kernel(
const unsigned int pNumPts,
const mccnn::int64_m* __restrict__ pKeys,
const mccnn::ipoint<D>* __restrict__ pNumCells,
int2* __restrict__ pOutDS)
{
int initPtIndex = mccnn::compute_global_index_gpu_funct();
int totalThreads = mccnn::compute_total_threads_gpu_funct();
for(int curPtIndex = initPtIndex; curPtIndex < pNumPts; curPtIndex += totalThreads)
{
//Get the key and compute the index into the ds.
mccnn::int64_m curKey = pKeys[curPtIndex];
int dsIndex = mccnn::compute_ds_index_from_key_gpu_funct(curKey, pNumCells[0]);
//Check if it is the first point in the ds cell.
int prevPtIndex = curPtIndex-1;
if(prevPtIndex > 0){
if(dsIndex !=
mccnn::compute_ds_index_from_key_gpu_funct(pKeys[prevPtIndex], pNumCells[0])){
pOutDS[dsIndex].x = curPtIndex;
}
}
//Check if it is the last point in the ds cell.
int nextPtIndex = curPtIndex+1;
if(nextPtIndex == pNumPts){
pOutDS[dsIndex].y = pNumPts;
}else if(dsIndex !=
mccnn::compute_ds_index_from_key_gpu_funct(pKeys[nextPtIndex], pNumCells[0])){
pOutDS[dsIndex].y = nextPtIndex;
}
}
}
///////////////////////// CPU
template<int D>
void mccnn::build_grid_ds_gpu(
std::unique_ptr<IGPUDevice>& pDevice,
const unsigned int pDSSize,
const unsigned int pNumPts,
const mccnn::int64_m* pInGPUPtrKeys,
const int* pInGPUPtrNumCells,
int* pOutGPUPtrDS)
{
//Get the cuda stream.
auto cudaStream = pDevice->getCUDAStream();
#ifdef DEBUG_INFO
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, cudaStream);
#endif
//Get the device properties.
const GpuDeviceProperties& gpuProps = pDevice->get_device_properties();
//Calculate the ideal number of blocks for the selected block size.
unsigned int numMP = gpuProps.numMPs_;
unsigned int blockSize = gpuProps.warpSize_*2;
unsigned int numBlocks = pDevice->get_max_active_block_x_sm(
blockSize,(const void*)build_grid_gpu_kernel<D>, 0);
pDevice->check_error(__FILE__, __LINE__);
//Calculate the total number of blocks to execute.
unsigned int execBlocks = pNumPts/blockSize;
execBlocks += (pNumPts%blockSize != 0)?1:0;
unsigned int totalNumBlocks = numMP*numBlocks;
totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks;
//Initialize to zero the output array.
pDevice->memset(pOutGPUPtrDS, 0, sizeof(int)*pDSSize);
pDevice->check_error(__FILE__, __LINE__);
//Execute the cuda kernel.
build_grid_gpu_kernel<D><<<totalNumBlocks, blockSize, 0, cudaStream>>>(
pNumPts,
pInGPUPtrKeys,
(const mccnn::ipoint<D>*)pInGPUPtrNumCells,
(int2*)pOutGPUPtrDS);
pDevice->check_error(__FILE__, __LINE__);
#ifdef DEBUG_INFO
cudaEventRecord(stop, cudaStream);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
int numCells[D];
pDevice->memcpy_device_to_host((void*)&numCells, (void*)pInGPUPtrNumCells, sizeof(int)*D);
int* dsCPU = new int[pDSSize];
pDevice->memcpy_device_to_host((void*)dsCPU, (void*)pOutGPUPtrDS, sizeof(int)*pDSSize);
int maxNumPts = 0;
int minNumPts = pNumPts;
for(int i=0; i < pDSSize; i+=2)
{
int curNumPts = dsCPU[i+1]-dsCPU[i];
if(curNumPts < minNumPts){
minNumPts = curNumPts;
}else if(curNumPts > maxNumPts){
maxNumPts = curNumPts;
}
}
int batchSize = pDSSize/(2*numCells[0]*numCells[1]);
float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_;
fprintf(stderr, "### BUILD GRID ###\n");
fprintf(stderr, "Num points: %d\n", pNumPts);
fprintf(stderr, "Batch size: %d\n", batchSize);
fprintf(stderr, "Grid size: ");
for(int i = 0; i < D; ++i)
fprintf(stderr, "%d ", numCells[i]);
fprintf(stderr, "\n");
fprintf(stderr, "Min num points x tube: %d\n", minNumPts);
fprintf(stderr, "Max num points x tube: %d\n", maxNumPts);
fprintf(stderr, "Occupancy: %f\n", gpuOccupancy);
fprintf(stderr, "Execution time: %f\n", milliseconds);
fprintf(stderr, "\n");
delete[] dsCPU;
#endif
}
///////////////////////// CPU Template declaration
#define BUILD_GRID_DS_TEMP_DECL(Dims) \
template void mccnn::build_grid_ds_gpu<Dims>( \
std::unique_ptr<IGPUDevice>& pDevice, \
const unsigned int pDSSize, \
const unsigned int pNumPts, \
const mccnn::int64_m* pInGPUPtrKeys, \
const int* pInGPUPtrNumCells, \
int* pOutGPUPtrDS);
DECLARE_TEMPLATE_DIMS(BUILD_GRID_DS_TEMP_DECL) |
64dbe82a10f8f7400ef07be20be515c525d14270.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error.hpp>
#include "graph.hpp"
#include "utilities/graph_utils.cuh"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_row_sum(
vertex_t n, edge_t const *csrPtr, vertex_t const *csrInd, weight_t const *v, weight_t *work)
{
vertex_t row;
edge_t start, end, length;
weight_t sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
// compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0) work[row] = sum;
} else {
work[row] = static_cast<weight_t>(length);
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_is(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, j, Ni, Nj;
vertex_t row, col;
vertex_t ref, cur, ref_col, cur_col, match;
weight_t ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[j] = work[row] + work[col];
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[j], ref_val); }
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_is_pairs(edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, idx, Ni, Nj, match;
vertex_t row, col, ref, cur, ref_col, cur_col;
weight_t ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[idx] = work[row] + work[col];
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[idx], ref_val); }
}
}
}
// Jaccard weights (*weight)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_jw(edge_t e,
weight_t const *weight_i,
weight_t const *weight_s,
weight_t *weight_j)
{
edge_t j;
weight_t Wi, Ws, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Ws = weight_s[j];
Wu = Ws - Wi;
weight_j[j] = (Wi / Wu);
}
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard(vertex_t n,
edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( jaccard_row_sum<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work);
hipDeviceSynchronize();
fill(e, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
hipLaunchKernelGGL(( jaccard_is<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( jaccard_jw<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, e, weight_i, weight_s, weight_j);
return 0;
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard_pairs(vertex_t n,
edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( jaccard_row_sum<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, n, csrPtr, csrInd, weight_in, work);
hipDeviceSynchronize();
// NOTE: initilized weight_i vector with 0.0
// fill(num_pairs, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
hipLaunchKernelGGL(( jaccard_is_pairs<weighted, vertex_t, edge_t, weight_t>), dim3(nblocks), dim3(nthreads), 0, 0,
num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (edge_t)CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
hipLaunchKernelGGL(( jaccard_jw<weighted, vertex_t, edge_t, weight_t>)
, dim3(nblocks), dim3(nthreads), 0, 0, num_pairs, weight_i, weight_s, weight_j);
return 0;
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void jaccard(GraphCSRView<VT, ET, WT> const &graph, WT const *weights, WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL");
rmm::device_vector<WT> weight_i(graph.number_of_edges);
rmm::device_vector<WT> weight_s(graph.number_of_edges);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard<false, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard<true, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template <typename VT, typename ET, typename WT>
void jaccard_list(GraphCSRView<VT, ET, WT> const &graph,
WT const *weights,
ET num_pairs,
VT const *first,
VT const *second,
WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL");
CUGRAPH_EXPECTS(first != nullptr, "Invalid input argument: first is NULL");
CUGRAPH_EXPECTS(second != nullptr, "Invalid input argument: second in NULL");
rmm::device_vector<WT> weight_i(num_pairs, WT{0.0});
rmm::device_vector<WT> weight_s(num_pairs);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard_pairs<false, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard_pairs<true, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template void jaccard<int32_t, int32_t, float>(GraphCSRView<int32_t, int32_t, float> const &,
float const *,
float *);
template void jaccard<int32_t, int32_t, double>(GraphCSRView<int32_t, int32_t, double> const &,
double const *,
double *);
template void jaccard<int64_t, int64_t, float>(GraphCSRView<int64_t, int64_t, float> const &,
float const *,
float *);
template void jaccard<int64_t, int64_t, double>(GraphCSRView<int64_t, int64_t, double> const &,
double const *,
double *);
template void jaccard_list<int32_t, int32_t, float>(GraphCSRView<int32_t, int32_t, float> const &,
float const *,
int32_t,
int32_t const *,
int32_t const *,
float *);
template void jaccard_list<int32_t, int32_t, double>(GraphCSRView<int32_t, int32_t, double> const &,
double const *,
int32_t,
int32_t const *,
int32_t const *,
double *);
template void jaccard_list<int64_t, int64_t, float>(GraphCSRView<int64_t, int64_t, float> const &,
float const *,
int64_t,
int64_t const *,
int64_t const *,
float *);
template void jaccard_list<int64_t, int64_t, double>(GraphCSRView<int64_t, int64_t, double> const &,
double const *,
int64_t,
int64_t const *,
int64_t const *,
double *);
} // namespace cugraph
| 64dbe82a10f8f7400ef07be20be515c525d14270.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/error.hpp>
#include "graph.hpp"
#include "utilities/graph_utils.cuh"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_row_sum(
vertex_t n, edge_t const *csrPtr, vertex_t const *csrInd, weight_t const *v, weight_t *work)
{
vertex_t row;
edge_t start, end, length;
weight_t sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
// compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0) work[row] = sum;
} else {
work[row] = static_cast<weight_t>(length);
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_is(vertex_t n,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, j, Ni, Nj;
vertex_t row, col;
vertex_t ref, cur, ref_col, cur_col, match;
weight_t ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[j] = work[row] + work[col];
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[j], ref_val); }
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_is_pairs(edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *v,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s)
{
edge_t i, idx, Ni, Nj, match;
vertex_t row, col, ref, cur, ref_col, cur_col;
weight_t ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
// find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
// compute new sum weights
weight_s[idx] = work[row] + work[col];
// compute new intersection weights
// search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
} else {
ref_val = 1.0;
}
// binary search (column indices are sorted within each row)
edge_t left = csrPtr[cur];
edge_t right = csrPtr[cur + 1] - 1;
while (left <= right) {
edge_t middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
} else if (cur_col < ref_col) {
left = middle + 1;
} else {
match = middle;
break;
}
}
// if the element with the same column index in the reference row has been found
if (match != -1) { atomicAdd(&weight_i[idx], ref_val); }
}
}
}
// Jaccard weights (*weight)
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
__global__ void jaccard_jw(edge_t e,
weight_t const *weight_i,
weight_t const *weight_s,
weight_t *weight_j)
{
edge_t j;
weight_t Wi, Ws, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Ws = weight_s[j];
Wu = Ws - Wi;
weight_j[j] = (Wi / Wu);
}
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard(vertex_t n,
edge_t e,
edge_t const *csrPtr,
vertex_t const *csrInd,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
jaccard_row_sum<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work);
cudaDeviceSynchronize();
fill(e, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
jaccard_is<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS});
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
jaccard_jw<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(e, weight_i, weight_s, weight_j);
return 0;
}
template <bool weighted, typename vertex_t, typename edge_t, typename weight_t>
int jaccard_pairs(vertex_t n,
edge_t num_pairs,
edge_t const *csrPtr,
vertex_t const *csrInd,
vertex_t const *first_pair,
vertex_t const *second_pair,
weight_t const *weight_in,
weight_t *work,
weight_t *weight_i,
weight_t *weight_s,
weight_t *weight_j)
{
dim3 nthreads, nblocks;
int y = 4;
// setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS});
nblocks.z = 1;
// launch kernel
jaccard_row_sum<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(n, csrPtr, csrInd, weight_in, work);
cudaDeviceSynchronize();
// NOTE: initilized weight_i vector with 0.0
// fill(num_pairs, weight_i, weight_t{0.0});
// setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1;
// launch kernel
jaccard_is_pairs<weighted, vertex_t, edge_t, weight_t><<<nblocks, nthreads>>>(
num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s);
// setup launch configuration
nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS});
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (edge_t)CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
// launch kernel
jaccard_jw<weighted, vertex_t, edge_t, weight_t>
<<<nblocks, nthreads>>>(num_pairs, weight_i, weight_s, weight_j);
return 0;
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void jaccard(GraphCSRView<VT, ET, WT> const &graph, WT const *weights, WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL");
rmm::device_vector<WT> weight_i(graph.number_of_edges);
rmm::device_vector<WT> weight_s(graph.number_of_edges);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard<false, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard<true, VT, ET, WT>(graph.number_of_vertices,
graph.number_of_edges,
graph.offsets,
graph.indices,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template <typename VT, typename ET, typename WT>
void jaccard_list(GraphCSRView<VT, ET, WT> const &graph,
WT const *weights,
ET num_pairs,
VT const *first,
VT const *second,
WT *result)
{
CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL");
CUGRAPH_EXPECTS(first != nullptr, "Invalid input argument: first is NULL");
CUGRAPH_EXPECTS(second != nullptr, "Invalid input argument: second in NULL");
rmm::device_vector<WT> weight_i(num_pairs, WT{0.0});
rmm::device_vector<WT> weight_s(num_pairs);
rmm::device_vector<WT> work(graph.number_of_vertices);
if (weights == nullptr) {
cugraph::detail::jaccard_pairs<false, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
} else {
cugraph::detail::jaccard_pairs<true, VT, ET, WT>(graph.number_of_vertices,
num_pairs,
graph.offsets,
graph.indices,
first,
second,
weights,
work.data().get(),
weight_i.data().get(),
weight_s.data().get(),
result);
}
}
template void jaccard<int32_t, int32_t, float>(GraphCSRView<int32_t, int32_t, float> const &,
float const *,
float *);
template void jaccard<int32_t, int32_t, double>(GraphCSRView<int32_t, int32_t, double> const &,
double const *,
double *);
template void jaccard<int64_t, int64_t, float>(GraphCSRView<int64_t, int64_t, float> const &,
float const *,
float *);
template void jaccard<int64_t, int64_t, double>(GraphCSRView<int64_t, int64_t, double> const &,
double const *,
double *);
template void jaccard_list<int32_t, int32_t, float>(GraphCSRView<int32_t, int32_t, float> const &,
float const *,
int32_t,
int32_t const *,
int32_t const *,
float *);
template void jaccard_list<int32_t, int32_t, double>(GraphCSRView<int32_t, int32_t, double> const &,
double const *,
int32_t,
int32_t const *,
int32_t const *,
double *);
template void jaccard_list<int64_t, int64_t, float>(GraphCSRView<int64_t, int64_t, float> const &,
float const *,
int64_t,
int64_t const *,
int64_t const *,
float *);
template void jaccard_list<int64_t, int64_t, double>(GraphCSRView<int64_t, int64_t, double> const &,
double const *,
int64_t,
int64_t const *,
int64_t const *,
double *);
} // namespace cugraph
|
a1f0ecde46cef57a3875d26f355c13890b7e9dd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// Device code for ICP computation
// Currently working only on performing rotation and translation using cuda
#ifndef _ICP_KERNEL_H_
#define _ICP_KERNEL_H_
#define TILE_WIDTH 256
#endif // #ifndef _ICP_KERNEL_H_
__global__ void CalculateDistanceIndexEachPoint(double point_x, double point_y, double point_z, double * data_x_d, double * data_y_d, double * data_z_d, int * bin_index_d, double * distance_d, int size_data)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
if(index < size_data)
{
distance_d[index] = sqrt(pow(data_x_d[index] - point_x,2) + pow(data_y_d[index] - point_y,2) + pow(data_z_d[index] - point_z,2));
bin_index_d[index] = index;
}
} | a1f0ecde46cef57a3875d26f355c13890b7e9dd7.cu | #include "includes.h"
// Device code for ICP computation
// Currently working only on performing rotation and translation using cuda
#ifndef _ICP_KERNEL_H_
#define _ICP_KERNEL_H_
#define TILE_WIDTH 256
#endif // #ifndef _ICP_KERNEL_H_
__global__ void CalculateDistanceIndexEachPoint(double point_x, double point_y, double point_z, double * data_x_d, double * data_y_d, double * data_z_d, int * bin_index_d, double * distance_d, int size_data)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
if(index < size_data)
{
distance_d[index] = sqrt(pow(data_x_d[index] - point_x,2) + pow(data_y_d[index] - point_y,2) + pow(data_z_d[index] - point_z,2));
bin_index_d[index] = index;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.