hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
223c2c2f5160e2f5dcaea236a89e75107f3ba6ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_2D_2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_2D_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_2D_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_2D_2D), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 223c2c2f5160e2f5dcaea236a89e75107f3ba6ee.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_2D_2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_2D_2D<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_2D_2D<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_2D_2D<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4f1efc07a96956cf3e5d46684c3f72d82fba9a29.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 4f1efc07a96956cf3e5d46684c3f72d82fba9a29.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
723dbf4dd08d90a0220956f8b2377b0cae4d0c3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <time.h>
#include "cnn.h"
using namespace std;
static void CheckCudaErrorAux(const char *, unsigned, const char *,
hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__device__ float non_linear(int type, float input_num)
{
float result = 0.0;
if (type == 0)
{
result = 1.0 / (1.0 + exp(0.0 - input_num));
}
return result;
}
__global__ void fc(
float* inputs,
float* outputs,
float* ws,
float* bs,
int input_w,
int output_w,
int lmethod)
{
int batch_id = blockIdx.x;
int output_id = blockIdx.y;
float* cur_input = inputs + batch_id * input_w;
float* cur_output = outputs + batch_id * output_w;
int idx = output_id * blockDim.x + threadIdx.x;
float cur_bs = bs[idx];
extern __shared__ float sm[];
for (int i = threadIdx.x; i < input_w; i += blockDim.x) {
sm[i] = cur_input[i];
}
__syncthreads();
if (idx < output_w) {
float sum = 0;
for (int i = 0; i < input_w; i++) {
sum += sm[i] * ws[i*output_w+idx];
}
cur_output[idx] = non_linear(lmethod,sum + cur_bs);
}
}
__global__ void pool_average(
float* inputs,
float* outputs,
int input_num,
int input_h,
int input_w,
int batch,
int kernel_h,
int kernel_w,
int trans_flag)
{
int batch_id = blockIdx.x;
int output_id = blockIdx.y;
int output_h = input_h/kernel_h;
int output_w = input_w/kernel_w;
float* cur_input = inputs + batch_id * input_num * input_h * input_w + output_id * input_h * input_w ;
float* cur_output = outputs + batch_id * input_num * output_h * output_w + output_id * output_h * output_w;
for (int i = 0; i < output_h * output_w; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < output_h * output_w) {
int tiy = ti / output_w;
int tix = ti % output_w;
float val = 0.0;
for (int h = 0; h < kernel_h; h++) {
int tmp_hid = (h + tiy * kernel_h) * input_w + tix * kernel_w;
for (int w = 0; w < kernel_w; w++) {
val += cur_input[tmp_hid + w];
}
}
int trans_tid = (trans_flag == 1) ? (tix * output_h + tiy) : ti;
cur_output[trans_tid] = val / (kernel_h * kernel_w);
}
}
}
__global__ void conv_shared(
float* inputs,
float* outputs,
float* ws,
float* bs,
int* k_index,
int* k_offset,
int input_num,
int input_h,
int input_w,
int output_num,
int output_h,
int output_w,
int batch,
int kernel_h,
int kernel_w,
int lmethod,
int stride)
{
int batch_id = blockIdx.x;
int output_id = blockIdx.y;
extern __shared__ float sm[];
int input_length = input_h * input_w;
float* sm_w = sm + input_length;
float* cur_input = inputs + batch_id * input_num * input_length;
float* cur_output = outputs + batch_id * output_num * output_h * output_w + output_id * output_h * output_w;
float* cur_ws = ws + k_offset[output_id] * kernel_h * kernel_w;
float cur_bs = bs[output_id];
int* cur_index = k_index + k_offset[output_id];
//load weights to shared memory
int ws_num = k_offset[output_id + 1] - k_offset[output_id];
int ws_length = ws_num * kernel_h * kernel_w;
for (int i = 0; i < ws_length; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < ws_length) {
sm_w[ti] = cur_ws[ti];
}
}
//initial shared memory of input data
for (int i = 0; i < input_length; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < input_length) {
sm[ti] = 0;
}
}
__syncthreads();
//convolution
for (int i = 0; i < output_h * output_w; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < output_h * output_w) {
int tiy = ti/output_w;
int tix = ti%output_w;
float val = 0.0;
for(int j=0;j<ws_num;j++){
int input_id = cur_index[j];
int tmp_wid = j*kernel_h*kernel_w;
// load input data to shared memory
for(int k=0;k<input_length;k += blockDim.x){
int tk = k+ threadIdx.x;
if(tk<input_length){
sm[tk] = cur_input[input_id*input_length+tk];
}
}
__syncthreads();
for(int h=0;h<kernel_h;h++){
int tmp_wid_h = tmp_wid + h*kernel_w;
int tmp_pid_h = (tiy*stride+h)*input_w + tix*stride;
for(int w=0;w<kernel_w;w++){
val += sm[tmp_pid_h+w] * sm_w[tmp_wid_h+w];
}
}
}
cur_output[ti] = non_linear(lmethod,val + cur_bs);
}
}
}
cnn::cnn(){
dev_data = NULL;
dev_weights = NULL;
layer_num = 0;
batch = 0;
input_h = INPUT_HEIGHT;
input_w = INPUT_WIDTH;
max_shared_memory_size = get_shared_memory();
kernel_time = 0;
cout<<"class cnn is created"<<endl;
}
size_t cnn::get_shared_memory() {
size_t sm_size = 0;
int dev_num = 0;
CUDA_CHECK_RETURN(hipGetDeviceCount(&dev_num));
if (dev_num > 0) {
hipSetDevice(0);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
sm_size = deviceProp.sharedMemPerBlock;
}
return sm_size;
}
cnn::~cnn(){
cout<<"class cnn is deleted"<<endl;
}
void cnn::load_mod(const char* file_path){
FILE *net_config;
net_config = fopen(file_path, "r");
fscanf(net_config, "%d", &layer_num);
dev_data = new nn_data[layer_num+1];
dev_weights = new nn_weights[layer_num];
dev_data[0].height = input_h;
dev_data[0].width = input_w;
dev_data[0].feature_num = 1;
for(int i=0;i<layer_num;i++){
int l_type = 0;
fscanf(net_config, "%d", &l_type);
dev_weights[i].layer_type = l_type;
if(l_type == LAYER_CONV){
//cout<<"read conv:"<<endl;
int flt_size, front_feature_size, flt_w, flt_h, lstride;
fscanf(net_config, "%d", &flt_size);
fscanf(net_config, "%d", &front_feature_size);
fscanf(net_config, "%d", &flt_w);
fscanf(net_config, "%d", &flt_h);
fscanf(net_config, "%d", &lstride);
dev_weights[i].kernel_w = flt_w;
dev_weights[i].kernel_h = flt_h;
dev_weights[i].stride = lstride;
dev_data[i+1].feature_num = flt_size;
dev_data[i+1].width = (dev_data[i].width - flt_w + 1)/lstride;
dev_data[i+1].height = (dev_data[i].height - flt_h + 1)/lstride;
dev_weights[i].method_type = LINEAR_SIGMOID;
float* tmp_w = (float*)malloc(sizeof(float) * flt_size*front_feature_size*flt_w*flt_h);
float* tmp_b = (float*)malloc(sizeof(float) * flt_size);
int* tmp_index = (int*)malloc(sizeof(int) * flt_size*front_feature_size);
int* tmp_offset = (int*)malloc(sizeof(int) * (flt_size+1));
int sum_index=0;
for(int j=0;j<flt_size;j++){
int flt_num = 0;
fscanf(net_config, "%d", &flt_num);
sum_index += flt_num;
if(j==0) tmp_offset[j] = 0;
tmp_offset[j+1] = tmp_offset[j] + flt_num;
for (int k = tmp_offset[j]; k < tmp_offset[j]+flt_num; k++){
fscanf(net_config, "%d", &tmp_index[k] );
for(int m = k*flt_w*flt_h; m< (k+1)*flt_w*flt_h;m++){
fscanf(net_config, "%f", &tmp_w[m] );
}
}
fscanf(net_config, "%f", &tmp_b[j]);
}
int total_rows = tmp_offset[flt_size];
CUDA_CHECK_RETURN(
hipMalloc((void ** )&dev_weights[i].weights,
sizeof(float) * total_rows * flt_w * flt_h));
CUDA_CHECK_RETURN(
hipMemcpy(dev_weights[i].weights, tmp_w,
sizeof(float) * total_rows * flt_w * flt_h,
hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(
hipMalloc((void ** )&dev_weights[i].bias,
sizeof(float) * flt_size));
CUDA_CHECK_RETURN(
hipMemcpy(dev_weights[i].bias, tmp_b,
sizeof(float) * flt_size, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(
hipMalloc((void ** )&dev_weights[i].kernel_index,
sizeof(int) * total_rows));
CUDA_CHECK_RETURN(
hipMemcpy(dev_weights[i].kernel_index, tmp_index,
sizeof(int) * total_rows, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(
hipMalloc((void ** )&dev_weights[i].kernel_offset,
sizeof(int) * (flt_size + 1)));
CUDA_CHECK_RETURN(
hipMemcpy(dev_weights[i].kernel_offset, tmp_offset,
sizeof(int) * (flt_size + 1),
hipMemcpyHostToDevice));
free(tmp_w);
free(tmp_b);
free(tmp_index);
free(tmp_offset);
}
else if(l_type == LAYER_FC){
//cout<<"read fc:"<<endl;
int input_num, output_num;
fscanf(net_config, "%d", &input_num);
fscanf(net_config, "%d", &output_num);
dev_data[i+1].feature_num = 1;
dev_data[i+1].width = output_num;
dev_data[i+1].height = 1;
dev_weights[i].kernel_h = input_num;
dev_weights[i].kernel_w = output_num;
dev_weights[i].method_type = LINEAR_SIGMOID;
dev_weights[i].stride = 1;
dev_weights[i].kernel_index = NULL;
dev_weights[i].kernel_offset = NULL;
float* tmp_w = (float*)malloc(sizeof(float) * input_num*output_num);
float* tmp_b = (float*)malloc(sizeof(float) * output_num);
for(int j=0;j<input_num*output_num;j++){
fscanf(net_config, "%f", &tmp_w[j]);
}
for(int j=0;j<output_num;j++){
fscanf(net_config, "%f", &tmp_b[j]);
}
CUDA_CHECK_RETURN(
hipMalloc((void ** )&dev_weights[i].weights,
sizeof(float) * input_num * output_num));
CUDA_CHECK_RETURN(
hipMemcpy(dev_weights[i].weights, tmp_w,
sizeof(float) * input_num * output_num,
hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(
hipMalloc((void ** )&dev_weights[i].bias,
sizeof(float) * output_num));
CUDA_CHECK_RETURN(
hipMemcpy(dev_weights[i].bias, tmp_b,
sizeof(float) * output_num,
hipMemcpyHostToDevice));
free(tmp_w);
free(tmp_b);
}
else if(l_type == LAYER_POOL){
//cout<<"read pool:"<<endl;
int lmethod = POOL_MAX;
int front_feature_size, width, height;
fscanf(net_config, "%d", &lmethod);
fscanf(net_config, "%d", &front_feature_size);
fscanf(net_config, "%d", &width);
fscanf(net_config, "%d", &height);
dev_data[i+1].feature_num = dev_data[i].feature_num;
dev_data[i+1].width = dev_data[i].width/width;
dev_data[i+1].height = dev_data[i].height/height;
dev_weights[i].kernel_h = width;
dev_weights[i].kernel_w = height;
dev_weights[i].method_type = lmethod;
dev_weights[i].stride = 0;
dev_weights[i].kernel_index = NULL;
dev_weights[i].kernel_offset = NULL;
dev_weights[i].weights = NULL;
dev_weights[i].bias = NULL;
}
else{
printf("Error: unknown layer type.");
}
}
fclose(net_config);
}
void cnn::load_input(const char* file_path){
FILE *input_data;
input_data = fopen(file_path, "r");
int input_width, input_height;
fscanf(input_data, "%d", &batch);
fscanf(input_data, "%d", &input_width);
fscanf(input_data, "%d", &input_height);
float *tmp = (float*)malloc(sizeof(float)*batch*input_width*input_height);
for(int i=0;i<batch*input_width*input_height;i++){
fscanf(input_data,"%f",&tmp[i]);
}
for(int i=0;i<layer_num+1;i++){
CUDA_CHECK_RETURN(
hipMalloc((void ** )&dev_data[i].data,
sizeof(float) * batch * dev_data[i].feature_num
* dev_data[i].height * dev_data[i].width));
}
CUDA_CHECK_RETURN(
hipMemcpy(dev_data[0].data, tmp,
sizeof(float) * batch * dev_data[0].feature_num
* dev_data[0].height * dev_data[0].width,
hipMemcpyHostToDevice));
free(tmp);
fclose(input_data);
}
void cnn::kernel_free(){
for(int i=0;i<layer_num;i++){
int t = dev_weights[i].layer_type;
if(t == LAYER_CONV){
CUDA_CHECK_RETURN(hipFree(dev_weights[i].bias));
CUDA_CHECK_RETURN(hipFree(dev_weights[i].weights));
CUDA_CHECK_RETURN(hipFree(dev_weights[i].kernel_offset));
CUDA_CHECK_RETURN(hipFree(dev_weights[i].kernel_index));
}
else if(t == LAYER_FC){
CUDA_CHECK_RETURN(hipFree(dev_weights[i].bias));
CUDA_CHECK_RETURN(hipFree(dev_weights[i].weights));
}
}
for(int i=0;i<layer_num+1;i++){
CUDA_CHECK_RETURN(hipFree(dev_data[i].data));
}
delete[] dev_weights;
delete[] dev_data;
}
void cnn::run(float *result){
//
// float kernel_time;
// hipEvent_t start1;
// hipEventCreate(&start1);
// hipEvent_t stop1;
// hipEventCreate(&stop1);
// hipEventRecord(start1, NULL);
for(int i=0;i<layer_num;i++){
int l_type = dev_weights[i].layer_type;
if(l_type == LAYER_CONV){
//cout<<"execution conv"<<endl;
int sm_size = sizeof(float) * (dev_data[i].height * dev_data[i].width
+ dev_data[i].feature_num * dev_weights[i].kernel_h * dev_weights[i].kernel_w);
//int trans_flag = (i < layer_num - 1 && dev_weights[i + 1].layer_type == LAYER_FC) ? 1 : 0;
if (sm_size < max_shared_memory_size) {
dim3 block = dim3(batch, dev_data[i + 1].feature_num);
int thread_num = ((dev_data[i + 1].height * dev_data[i + 1].width + MIN_THREADS_UNIT - 1) / MIN_THREADS_UNIT)
* MIN_THREADS_UNIT;
dim3 thread = dim3(thread_num < MAX_THREADS_PER_BLOCK ? thread_num : MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( conv_shared), dim3(block), dim3(thread), sm_size, 0,
dev_data[i].data,
dev_data[i + 1].data,
dev_weights[i].weights,
dev_weights[i].bias,
dev_weights[i].kernel_index,
dev_weights[i].kernel_offset,
dev_data[i].feature_num,
dev_data[i].height,
dev_data[i].width,
dev_data[i + 1].feature_num,
dev_data[i + 1].height,
dev_data[i + 1].width,
batch,
dev_weights[i].kernel_h,
dev_weights[i].kernel_w,
dev_weights[i].method_type,
dev_weights[i].stride);
}
else {
cout<<"Error: don't support too large input data"<<endl;
return;
}
}
else if(l_type == LAYER_POOL){
//cout<<"execution pool"<<endl;
int trans_flag = (i < layer_num - 1 && dev_weights[i + 1].layer_type == LAYER_FC) ? 1 : 0;
dim3 block = dim3(batch, dev_data[i + 1].feature_num);
int thread_num = ((dev_data[i + 1].height * dev_data[i + 1].width + MIN_THREADS_UNIT - 1) / MIN_THREADS_UNIT)
* MIN_THREADS_UNIT;
dim3 thread = dim3(thread_num < MAX_THREADS_PER_BLOCK ? thread_num : MAX_THREADS_PER_BLOCK);
if(dev_weights[i].method_type == POOL_AVERAGE){
hipLaunchKernelGGL(( pool_average), dim3(block),dim3(thread), 0, 0,
dev_data[i].data,
dev_data[i + 1].data,
dev_data[i].feature_num,
dev_data[i].height,
dev_data[i].width,
batch,
dev_weights[i].kernel_h,
dev_weights[i].kernel_w,
trans_flag);
}
if(trans_flag == 1){
dev_data[i+1].width = dev_data[i + 1].feature_num * dev_data[i + 1].height * dev_data[i + 1].width;
dev_data[i+1].feature_num = 1;
dev_data[i+1].height = 1;
//cout<<"after trans:"<< dev_data[i+1].width<<endl;
}
}
else if(l_type == LAYER_FC){
//cout << "execution fc" << endl;
int sm_size = sizeof(float) * dev_data[i].width;
if (sm_size < max_shared_memory_size) {
int thread_num = ((dev_data[i + 1].width + MIN_THREADS_UNIT - 1)
/ MIN_THREADS_UNIT) * MIN_THREADS_UNIT;
dim3 thread = dim3(
thread_num < MAX_THREADS_PER_BLOCK ?
thread_num : MAX_THREADS_PER_BLOCK);
dim3 block = dim3(batch,
thread_num < MAX_THREADS_PER_BLOCK ?
1 : (thread_num + MAX_THREADS_PER_BLOCK - 1)
/ MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( fc), dim3(block), dim3(thread), sm_size, 0,
dev_data[i].data,
dev_data[i + 1].data,
dev_weights[i].weights,
dev_weights[i].bias,
dev_data[i].width,
dev_data[i + 1].width,
dev_weights[i].method_type);
}
}
else{
printf("Error: unknown layer type.");
}
//used for debug:
// float *tmp_out = new float[batch * dev_data[i + 1].feature_num
// * dev_data[i + 1].height * dev_data[i + 1].width];
// CUDA_CHECK_RETURN(
// hipMemcpy(tmp_out, dev_data[i + 1].data,
// sizeof(float) * batch * dev_data[i + 1].feature_num
// * dev_data[i + 1].height
// * dev_data[i + 1].width,
// hipMemcpyDeviceToHost));
// int zz = 0;
// for (int t = 0; t < dev_data[i + 1].feature_num * batch; t++) {
// cout << "feature_map" << t << ":" << endl;
// for (int x = 0; x < dev_data[i + 1].height; x++) {
// for (int y = 0; y < dev_data[i + 1].width; y++) {
// cout << tmp_out[zz] << " ";
// zz++;
// }
// cout << endl;
// }
// }
// free(tmp_out);
//debug end;
}
// hipEventRecord(stop1, NULL);
// hipEventSynchronize(stop1);
// hipEventElapsedTime(&kernel_time, start1, stop1);
// hipEventDestroy(start1);
// hipEventDestroy(stop1);
// cout<<kernel_time<<endl;
CUDA_CHECK_RETURN(
hipMemcpy(result, dev_data[layer_num].data,
sizeof(float) * batch * dev_data[layer_num].feature_num
* dev_data[layer_num].height * dev_data[layer_num].width,
hipMemcpyDeviceToHost));
//cout<<batch<<endl;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux(const char *file, unsigned line,
const char *statement, hipError_t err) {
if (err == hipSuccess)
return;
std::cerr << statement << " returned " << hipGetErrorString(err) << "("
<< err << ") at " << file << ":" << line << std::endl;
exit(1);
}
| 723dbf4dd08d90a0220956f8b2377b0cae4d0c3e.cu | #include <iostream>
#include <fstream>
#include <stdlib.h>
#include <time.h>
#include "cnn.h"
using namespace std;
static void CheckCudaErrorAux(const char *, unsigned, const char *,
cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__device__ float non_linear(int type, float input_num)
{
float result = 0.0;
if (type == 0)
{
result = 1.0 / (1.0 + exp(0.0 - input_num));
}
return result;
}
__global__ void fc(
float* inputs,
float* outputs,
float* ws,
float* bs,
int input_w,
int output_w,
int lmethod)
{
int batch_id = blockIdx.x;
int output_id = blockIdx.y;
float* cur_input = inputs + batch_id * input_w;
float* cur_output = outputs + batch_id * output_w;
int idx = output_id * blockDim.x + threadIdx.x;
float cur_bs = bs[idx];
extern __shared__ float sm[];
for (int i = threadIdx.x; i < input_w; i += blockDim.x) {
sm[i] = cur_input[i];
}
__syncthreads();
if (idx < output_w) {
float sum = 0;
for (int i = 0; i < input_w; i++) {
sum += sm[i] * ws[i*output_w+idx];
}
cur_output[idx] = non_linear(lmethod,sum + cur_bs);
}
}
__global__ void pool_average(
float* inputs,
float* outputs,
int input_num,
int input_h,
int input_w,
int batch,
int kernel_h,
int kernel_w,
int trans_flag)
{
int batch_id = blockIdx.x;
int output_id = blockIdx.y;
int output_h = input_h/kernel_h;
int output_w = input_w/kernel_w;
float* cur_input = inputs + batch_id * input_num * input_h * input_w + output_id * input_h * input_w ;
float* cur_output = outputs + batch_id * input_num * output_h * output_w + output_id * output_h * output_w;
for (int i = 0; i < output_h * output_w; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < output_h * output_w) {
int tiy = ti / output_w;
int tix = ti % output_w;
float val = 0.0;
for (int h = 0; h < kernel_h; h++) {
int tmp_hid = (h + tiy * kernel_h) * input_w + tix * kernel_w;
for (int w = 0; w < kernel_w; w++) {
val += cur_input[tmp_hid + w];
}
}
int trans_tid = (trans_flag == 1) ? (tix * output_h + tiy) : ti;
cur_output[trans_tid] = val / (kernel_h * kernel_w);
}
}
}
__global__ void conv_shared(
float* inputs,
float* outputs,
float* ws,
float* bs,
int* k_index,
int* k_offset,
int input_num,
int input_h,
int input_w,
int output_num,
int output_h,
int output_w,
int batch,
int kernel_h,
int kernel_w,
int lmethod,
int stride)
{
int batch_id = blockIdx.x;
int output_id = blockIdx.y;
extern __shared__ float sm[];
int input_length = input_h * input_w;
float* sm_w = sm + input_length;
float* cur_input = inputs + batch_id * input_num * input_length;
float* cur_output = outputs + batch_id * output_num * output_h * output_w + output_id * output_h * output_w;
float* cur_ws = ws + k_offset[output_id] * kernel_h * kernel_w;
float cur_bs = bs[output_id];
int* cur_index = k_index + k_offset[output_id];
//load weights to shared memory
int ws_num = k_offset[output_id + 1] - k_offset[output_id];
int ws_length = ws_num * kernel_h * kernel_w;
for (int i = 0; i < ws_length; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < ws_length) {
sm_w[ti] = cur_ws[ti];
}
}
//initial shared memory of input data
for (int i = 0; i < input_length; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < input_length) {
sm[ti] = 0;
}
}
__syncthreads();
//convolution
for (int i = 0; i < output_h * output_w; i += blockDim.x) {
int ti = i + threadIdx.x;
if (ti < output_h * output_w) {
int tiy = ti/output_w;
int tix = ti%output_w;
float val = 0.0;
for(int j=0;j<ws_num;j++){
int input_id = cur_index[j];
int tmp_wid = j*kernel_h*kernel_w;
// load input data to shared memory
for(int k=0;k<input_length;k += blockDim.x){
int tk = k+ threadIdx.x;
if(tk<input_length){
sm[tk] = cur_input[input_id*input_length+tk];
}
}
__syncthreads();
for(int h=0;h<kernel_h;h++){
int tmp_wid_h = tmp_wid + h*kernel_w;
int tmp_pid_h = (tiy*stride+h)*input_w + tix*stride;
for(int w=0;w<kernel_w;w++){
val += sm[tmp_pid_h+w] * sm_w[tmp_wid_h+w];
}
}
}
cur_output[ti] = non_linear(lmethod,val + cur_bs);
}
}
}
cnn::cnn(){
dev_data = NULL;
dev_weights = NULL;
layer_num = 0;
batch = 0;
input_h = INPUT_HEIGHT;
input_w = INPUT_WIDTH;
max_shared_memory_size = get_shared_memory();
kernel_time = 0;
cout<<"class cnn is created"<<endl;
}
size_t cnn::get_shared_memory() {
size_t sm_size = 0;
int dev_num = 0;
CUDA_CHECK_RETURN(cudaGetDeviceCount(&dev_num));
if (dev_num > 0) {
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
sm_size = deviceProp.sharedMemPerBlock;
}
return sm_size;
}
cnn::~cnn(){
cout<<"class cnn is deleted"<<endl;
}
void cnn::load_mod(const char* file_path){
FILE *net_config;
net_config = fopen(file_path, "r");
fscanf(net_config, "%d", &layer_num);
dev_data = new nn_data[layer_num+1];
dev_weights = new nn_weights[layer_num];
dev_data[0].height = input_h;
dev_data[0].width = input_w;
dev_data[0].feature_num = 1;
for(int i=0;i<layer_num;i++){
int l_type = 0;
fscanf(net_config, "%d", &l_type);
dev_weights[i].layer_type = l_type;
if(l_type == LAYER_CONV){
//cout<<"read conv:"<<endl;
int flt_size, front_feature_size, flt_w, flt_h, lstride;
fscanf(net_config, "%d", &flt_size);
fscanf(net_config, "%d", &front_feature_size);
fscanf(net_config, "%d", &flt_w);
fscanf(net_config, "%d", &flt_h);
fscanf(net_config, "%d", &lstride);
dev_weights[i].kernel_w = flt_w;
dev_weights[i].kernel_h = flt_h;
dev_weights[i].stride = lstride;
dev_data[i+1].feature_num = flt_size;
dev_data[i+1].width = (dev_data[i].width - flt_w + 1)/lstride;
dev_data[i+1].height = (dev_data[i].height - flt_h + 1)/lstride;
dev_weights[i].method_type = LINEAR_SIGMOID;
float* tmp_w = (float*)malloc(sizeof(float) * flt_size*front_feature_size*flt_w*flt_h);
float* tmp_b = (float*)malloc(sizeof(float) * flt_size);
int* tmp_index = (int*)malloc(sizeof(int) * flt_size*front_feature_size);
int* tmp_offset = (int*)malloc(sizeof(int) * (flt_size+1));
int sum_index=0;
for(int j=0;j<flt_size;j++){
int flt_num = 0;
fscanf(net_config, "%d", &flt_num);
sum_index += flt_num;
if(j==0) tmp_offset[j] = 0;
tmp_offset[j+1] = tmp_offset[j] + flt_num;
for (int k = tmp_offset[j]; k < tmp_offset[j]+flt_num; k++){
fscanf(net_config, "%d", &tmp_index[k] );
for(int m = k*flt_w*flt_h; m< (k+1)*flt_w*flt_h;m++){
fscanf(net_config, "%f", &tmp_w[m] );
}
}
fscanf(net_config, "%f", &tmp_b[j]);
}
int total_rows = tmp_offset[flt_size];
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&dev_weights[i].weights,
sizeof(float) * total_rows * flt_w * flt_h));
CUDA_CHECK_RETURN(
cudaMemcpy(dev_weights[i].weights, tmp_w,
sizeof(float) * total_rows * flt_w * flt_h,
cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&dev_weights[i].bias,
sizeof(float) * flt_size));
CUDA_CHECK_RETURN(
cudaMemcpy(dev_weights[i].bias, tmp_b,
sizeof(float) * flt_size, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&dev_weights[i].kernel_index,
sizeof(int) * total_rows));
CUDA_CHECK_RETURN(
cudaMemcpy(dev_weights[i].kernel_index, tmp_index,
sizeof(int) * total_rows, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&dev_weights[i].kernel_offset,
sizeof(int) * (flt_size + 1)));
CUDA_CHECK_RETURN(
cudaMemcpy(dev_weights[i].kernel_offset, tmp_offset,
sizeof(int) * (flt_size + 1),
cudaMemcpyHostToDevice));
free(tmp_w);
free(tmp_b);
free(tmp_index);
free(tmp_offset);
}
else if(l_type == LAYER_FC){
//cout<<"read fc:"<<endl;
int input_num, output_num;
fscanf(net_config, "%d", &input_num);
fscanf(net_config, "%d", &output_num);
dev_data[i+1].feature_num = 1;
dev_data[i+1].width = output_num;
dev_data[i+1].height = 1;
dev_weights[i].kernel_h = input_num;
dev_weights[i].kernel_w = output_num;
dev_weights[i].method_type = LINEAR_SIGMOID;
dev_weights[i].stride = 1;
dev_weights[i].kernel_index = NULL;
dev_weights[i].kernel_offset = NULL;
float* tmp_w = (float*)malloc(sizeof(float) * input_num*output_num);
float* tmp_b = (float*)malloc(sizeof(float) * output_num);
for(int j=0;j<input_num*output_num;j++){
fscanf(net_config, "%f", &tmp_w[j]);
}
for(int j=0;j<output_num;j++){
fscanf(net_config, "%f", &tmp_b[j]);
}
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&dev_weights[i].weights,
sizeof(float) * input_num * output_num));
CUDA_CHECK_RETURN(
cudaMemcpy(dev_weights[i].weights, tmp_w,
sizeof(float) * input_num * output_num,
cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&dev_weights[i].bias,
sizeof(float) * output_num));
CUDA_CHECK_RETURN(
cudaMemcpy(dev_weights[i].bias, tmp_b,
sizeof(float) * output_num,
cudaMemcpyHostToDevice));
free(tmp_w);
free(tmp_b);
}
else if(l_type == LAYER_POOL){
//cout<<"read pool:"<<endl;
int lmethod = POOL_MAX;
int front_feature_size, width, height;
fscanf(net_config, "%d", &lmethod);
fscanf(net_config, "%d", &front_feature_size);
fscanf(net_config, "%d", &width);
fscanf(net_config, "%d", &height);
dev_data[i+1].feature_num = dev_data[i].feature_num;
dev_data[i+1].width = dev_data[i].width/width;
dev_data[i+1].height = dev_data[i].height/height;
dev_weights[i].kernel_h = width;
dev_weights[i].kernel_w = height;
dev_weights[i].method_type = lmethod;
dev_weights[i].stride = 0;
dev_weights[i].kernel_index = NULL;
dev_weights[i].kernel_offset = NULL;
dev_weights[i].weights = NULL;
dev_weights[i].bias = NULL;
}
else{
printf("Error: unknown layer type.");
}
}
fclose(net_config);
}
void cnn::load_input(const char* file_path){
FILE *input_data;
input_data = fopen(file_path, "r");
int input_width, input_height;
fscanf(input_data, "%d", &batch);
fscanf(input_data, "%d", &input_width);
fscanf(input_data, "%d", &input_height);
float *tmp = (float*)malloc(sizeof(float)*batch*input_width*input_height);
for(int i=0;i<batch*input_width*input_height;i++){
fscanf(input_data,"%f",&tmp[i]);
}
for(int i=0;i<layer_num+1;i++){
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&dev_data[i].data,
sizeof(float) * batch * dev_data[i].feature_num
* dev_data[i].height * dev_data[i].width));
}
CUDA_CHECK_RETURN(
cudaMemcpy(dev_data[0].data, tmp,
sizeof(float) * batch * dev_data[0].feature_num
* dev_data[0].height * dev_data[0].width,
cudaMemcpyHostToDevice));
free(tmp);
fclose(input_data);
}
void cnn::kernel_free(){
for(int i=0;i<layer_num;i++){
int t = dev_weights[i].layer_type;
if(t == LAYER_CONV){
CUDA_CHECK_RETURN(cudaFree(dev_weights[i].bias));
CUDA_CHECK_RETURN(cudaFree(dev_weights[i].weights));
CUDA_CHECK_RETURN(cudaFree(dev_weights[i].kernel_offset));
CUDA_CHECK_RETURN(cudaFree(dev_weights[i].kernel_index));
}
else if(t == LAYER_FC){
CUDA_CHECK_RETURN(cudaFree(dev_weights[i].bias));
CUDA_CHECK_RETURN(cudaFree(dev_weights[i].weights));
}
}
for(int i=0;i<layer_num+1;i++){
CUDA_CHECK_RETURN(cudaFree(dev_data[i].data));
}
delete[] dev_weights;
delete[] dev_data;
}
void cnn::run(float *result){
//
// float kernel_time;
// cudaEvent_t start1;
// cudaEventCreate(&start1);
// cudaEvent_t stop1;
// cudaEventCreate(&stop1);
// cudaEventRecord(start1, NULL);
for(int i=0;i<layer_num;i++){
int l_type = dev_weights[i].layer_type;
if(l_type == LAYER_CONV){
//cout<<"execution conv"<<endl;
int sm_size = sizeof(float) * (dev_data[i].height * dev_data[i].width
+ dev_data[i].feature_num * dev_weights[i].kernel_h * dev_weights[i].kernel_w);
//int trans_flag = (i < layer_num - 1 && dev_weights[i + 1].layer_type == LAYER_FC) ? 1 : 0;
if (sm_size < max_shared_memory_size) {
dim3 block = dim3(batch, dev_data[i + 1].feature_num);
int thread_num = ((dev_data[i + 1].height * dev_data[i + 1].width + MIN_THREADS_UNIT - 1) / MIN_THREADS_UNIT)
* MIN_THREADS_UNIT;
dim3 thread = dim3(thread_num < MAX_THREADS_PER_BLOCK ? thread_num : MAX_THREADS_PER_BLOCK);
conv_shared<<<block, thread, sm_size>>>(
dev_data[i].data,
dev_data[i + 1].data,
dev_weights[i].weights,
dev_weights[i].bias,
dev_weights[i].kernel_index,
dev_weights[i].kernel_offset,
dev_data[i].feature_num,
dev_data[i].height,
dev_data[i].width,
dev_data[i + 1].feature_num,
dev_data[i + 1].height,
dev_data[i + 1].width,
batch,
dev_weights[i].kernel_h,
dev_weights[i].kernel_w,
dev_weights[i].method_type,
dev_weights[i].stride);
}
else {
cout<<"Error: don't support too large input data"<<endl;
return;
}
}
else if(l_type == LAYER_POOL){
//cout<<"execution pool"<<endl;
int trans_flag = (i < layer_num - 1 && dev_weights[i + 1].layer_type == LAYER_FC) ? 1 : 0;
dim3 block = dim3(batch, dev_data[i + 1].feature_num);
int thread_num = ((dev_data[i + 1].height * dev_data[i + 1].width + MIN_THREADS_UNIT - 1) / MIN_THREADS_UNIT)
* MIN_THREADS_UNIT;
dim3 thread = dim3(thread_num < MAX_THREADS_PER_BLOCK ? thread_num : MAX_THREADS_PER_BLOCK);
if(dev_weights[i].method_type == POOL_AVERAGE){
pool_average<<<block,thread>>>(
dev_data[i].data,
dev_data[i + 1].data,
dev_data[i].feature_num,
dev_data[i].height,
dev_data[i].width,
batch,
dev_weights[i].kernel_h,
dev_weights[i].kernel_w,
trans_flag);
}
if(trans_flag == 1){
dev_data[i+1].width = dev_data[i + 1].feature_num * dev_data[i + 1].height * dev_data[i + 1].width;
dev_data[i+1].feature_num = 1;
dev_data[i+1].height = 1;
//cout<<"after trans:"<< dev_data[i+1].width<<endl;
}
}
else if(l_type == LAYER_FC){
//cout << "execution fc" << endl;
int sm_size = sizeof(float) * dev_data[i].width;
if (sm_size < max_shared_memory_size) {
int thread_num = ((dev_data[i + 1].width + MIN_THREADS_UNIT - 1)
/ MIN_THREADS_UNIT) * MIN_THREADS_UNIT;
dim3 thread = dim3(
thread_num < MAX_THREADS_PER_BLOCK ?
thread_num : MAX_THREADS_PER_BLOCK);
dim3 block = dim3(batch,
thread_num < MAX_THREADS_PER_BLOCK ?
1 : (thread_num + MAX_THREADS_PER_BLOCK - 1)
/ MAX_THREADS_PER_BLOCK);
fc<<<block, thread, sm_size>>>(
dev_data[i].data,
dev_data[i + 1].data,
dev_weights[i].weights,
dev_weights[i].bias,
dev_data[i].width,
dev_data[i + 1].width,
dev_weights[i].method_type);
}
}
else{
printf("Error: unknown layer type.");
}
//used for debug:
// float *tmp_out = new float[batch * dev_data[i + 1].feature_num
// * dev_data[i + 1].height * dev_data[i + 1].width];
// CUDA_CHECK_RETURN(
// cudaMemcpy(tmp_out, dev_data[i + 1].data,
// sizeof(float) * batch * dev_data[i + 1].feature_num
// * dev_data[i + 1].height
// * dev_data[i + 1].width,
// cudaMemcpyDeviceToHost));
// int zz = 0;
// for (int t = 0; t < dev_data[i + 1].feature_num * batch; t++) {
// cout << "feature_map" << t << ":" << endl;
// for (int x = 0; x < dev_data[i + 1].height; x++) {
// for (int y = 0; y < dev_data[i + 1].width; y++) {
// cout << tmp_out[zz] << " ";
// zz++;
// }
// cout << endl;
// }
// }
// free(tmp_out);
//debug end;
}
// cudaEventRecord(stop1, NULL);
// cudaEventSynchronize(stop1);
// cudaEventElapsedTime(&kernel_time, start1, stop1);
// cudaEventDestroy(start1);
// cudaEventDestroy(stop1);
// cout<<kernel_time<<endl;
CUDA_CHECK_RETURN(
cudaMemcpy(result, dev_data[layer_num].data,
sizeof(float) * batch * dev_data[layer_num].feature_num
* dev_data[layer_num].height * dev_data[layer_num].width,
cudaMemcpyDeviceToHost));
//cout<<batch<<endl;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux(const char *file, unsigned line,
const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement << " returned " << cudaGetErrorString(err) << "("
<< err << ") at " << file << ":" << line << std::endl;
exit(1);
}
|
f45d352ac99e276d3a2d8004a352b6f85258f9c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: transform_precoder
% Description: perform transform precoding on complex data after mapper
% Inputs: *symbols_R_h: Real part of the symbols
% Inputs: *symbols_I_h: Imag part of the symbols
% M_pusch_rb numer of resource blocks assigned to ue
% Outputs: *precoded_data transform precodded data
By: Ahmad Nour & Mohammed Mostafa
*/
/*
coeff_multiply kernel just multiples the output symbols by a coeff. The kernel's overhead can be avoided if we
merged it with the mapper kernel
*/
#include "transform_precoder.cuh"
__global__ void coeff_multiply(hipfftComplex* symbols_d, double coeff, int numThreads) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Not to run more threads than available data
if (idx >= numThreads)
return;
symbols_d[idx].x *= coeff;
symbols_d[idx].y *= coeff;
}
void transform_precoder(hipfftComplex* symbols_h, const int M_pusch_rb, int signal_size, hipfftComplex** precoded_data_h)
{
int M_pusch_sc = N_sc_rb * M_pusch_rb;
//For timing purpose
float elapsed = 0; //For time calc.
hipEvent_t start, stop;
//Device data
hipfftComplex* symbols_d;
hipfftComplex* precoded_data_d;
//Host data allocation
*precoded_data_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*signal_size);
//Device data allocation
startTimer();
hipMalloc((void **)&symbols_d, sizeof(hipfftComplex)*signal_size);
hipMalloc((void **)&precoded_data_d, sizeof(hipfftComplex)*signal_size);
stopTimer("hipMalloc Time= %.6f ms\n", elapsed);
//Copying data to device
startTimer();
hipMemcpy(symbols_d, symbols_h, sizeof(hipfftComplex)*signal_size, hipMemcpyHostToDevice);
stopTimer("hipMemcpy Host->Device Time= %.6f ms\n", elapsed);
// CUFFT plan
int N_SIGS = signal_size / M_pusch_sc;
int n[1] = { M_pusch_sc };
hipfftHandle plan;
hipfftPlanMany(&plan, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, HIPFFT_C2C, N_SIGS);
hipfftExecC2C(plan, symbols_d, precoded_data_d, HIPFFT_FORWARD);
//Calc. number of needed threads for calling kernel(s)
int numThreads = signal_size;
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Coeff. Multiplication
coeff_multiply << <gridDim, blockDim >> > (precoded_data_d, rsqrtf(M_pusch_sc), numThreads);
//Retrieve data from device
startTimer();
hipMemcpy(*precoded_data_h, precoded_data_d, sizeof(hipfftComplex)*signal_size, hipMemcpyDeviceToHost);
stopTimer("hipMemcpy Device->Host Time= %.6f ms\n", elapsed);
// Cleanup
hipFree(symbols_d);
hipFree(precoded_data_d);
//Destroy timers
destroyTimers();
}
| f45d352ac99e276d3a2d8004a352b6f85258f9c1.cu | /*
% Function: transform_precoder
% Description: perform transform precoding on complex data after mapper
% Inputs: *symbols_R_h: Real part of the symbols
% Inputs: *symbols_I_h: Imag part of the symbols
% M_pusch_rb numer of resource blocks assigned to ue
% Outputs: *precoded_data transform precodded data
By: Ahmad Nour & Mohammed Mostafa
*/
/*
coeff_multiply kernel just multiples the output symbols by a coeff. The kernel's overhead can be avoided if we
merged it with the mapper kernel
*/
#include "transform_precoder.cuh"
__global__ void coeff_multiply(cufftComplex* symbols_d, double coeff, int numThreads) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Not to run more threads than available data
if (idx >= numThreads)
return;
symbols_d[idx].x *= coeff;
symbols_d[idx].y *= coeff;
}
void transform_precoder(cufftComplex* symbols_h, const int M_pusch_rb, int signal_size, cufftComplex** precoded_data_h)
{
int M_pusch_sc = N_sc_rb * M_pusch_rb;
//For timing purpose
float elapsed = 0; //For time calc.
cudaEvent_t start, stop;
//Device data
cufftComplex* symbols_d;
cufftComplex* precoded_data_d;
//Host data allocation
*precoded_data_h = (cufftComplex *)malloc(sizeof(cufftComplex)*signal_size);
//Device data allocation
startTimer();
cudaMalloc((void **)&symbols_d, sizeof(cufftComplex)*signal_size);
cudaMalloc((void **)&precoded_data_d, sizeof(cufftComplex)*signal_size);
stopTimer("cudaMalloc Time= %.6f ms\n", elapsed);
//Copying data to device
startTimer();
cudaMemcpy(symbols_d, symbols_h, sizeof(cufftComplex)*signal_size, cudaMemcpyHostToDevice);
stopTimer("cudaMemcpy Host->Device Time= %.6f ms\n", elapsed);
// CUFFT plan
int N_SIGS = signal_size / M_pusch_sc;
int n[1] = { M_pusch_sc };
cufftHandle plan;
cufftPlanMany(&plan, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, CUFFT_C2C, N_SIGS);
cufftExecC2C(plan, symbols_d, precoded_data_d, CUFFT_FORWARD);
//Calc. number of needed threads for calling kernel(s)
int numThreads = signal_size;
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Coeff. Multiplication
coeff_multiply << <gridDim, blockDim >> > (precoded_data_d, rsqrtf(M_pusch_sc), numThreads);
//Retrieve data from device
startTimer();
cudaMemcpy(*precoded_data_h, precoded_data_d, sizeof(cufftComplex)*signal_size, cudaMemcpyDeviceToHost);
stopTimer("cudaMemcpy Device->Host Time= %.6f ms\n", elapsed);
// Cleanup
cudaFree(symbols_d);
cudaFree(precoded_data_d);
//Destroy timers
destroyTimers();
}
|
050ba6e4f895111ff159cb68883553969ad62fd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ****************************************
Requirements: CUDA enabled GPU installed and NVIDIA driver installed
Input: a 320x240 ppm image file given in command line as argument
Output: Image processed ppm file. This looks sharper.
How: This program uses the same algorithm as found in Prof Siewert's "sharpen.c" code for the image processing part.
The computation is performed in the GP-GPU.
Aurhor: Adnan Reza
Credit: Sam Siewert for the original image processing code segment.
****************************************** */
#include<cutil_inline.h>
#include<cuda_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#include<stdint.h>
#include<unistd.h>
#include<sys/types.h>
#include<unistd.h>
#include<fcntl.h>
#include<sys/time.h>
#include<errno.h>
#include<time.h>
#define nROW (240)
#define nCOL (320)
#define SIZE (nROW*nCOL)
#define USE_ROCM
// Host memory for image pixels
uint8_t r[SIZE];
uint8_t g[SIZE];
uint8_t b[SIZE];
uint8_t sharpR[SIZE];
uint8_t sharpG[SIZE];
uint8_t sharpB[SIZE];
// Device memory for image pixels
uint8_t *rdev; size_t rdevp;
uint8_t *gdev; size_t gdevp;
uint8_t *bdev; size_t bdevp;
uint8_t *sharpRdev; size_t sharpRdevp;
uint8_t *sharpGdev; size_t sharpGdevp;
uint8_t *sharpBdev; size_t sharpBdevp;
struct timeval tv_start, tv_end;
#ifdef USE_ROCM
// The CUDA Kernel to perform the computations
__global__ void image_process(uint8_t* r, uint8_t* g, uint8_t* b, size_t rP, size_t gP, size_t bP
,uint8_t* newR, uint8_t* newG, uint8_t* newB, size_t newRp, size_t newGp, size_t newBp){
int temp,ii;
double K=4.0;
double PSF[9] = {-K/8.0, -K/8.0, -K/8.0, -K/8.0, K+1.0, -K/8.0, -K/8.0, -K/8.0, -K/8.0};
int th_row=blockIdx.y*blockDim.y+threadIdx.y;
int th_col=blockIdx.x*blockDim.x+threadIdx.x;
int i=th_row; int j=th_col;
newR[(i*newRp)+j]=(uint8_t)0;
newG[(i*newGp)+j]=(uint8_t)0;
newB[(i*newBp)+j]=(uint8_t)0;
if(0<th_row && nROW-1>th_row && 0<th_col && nCOL-1>th_col){
temp=0;
temp += (PSF[0] * (float)r[((i-1)*rP)+j-1]);
temp += (PSF[1] * (float)r[((i-1)*rP)+j]);
temp += (PSF[2] * (float)r[((i-1)*rP)+j+1]);
temp += (PSF[3] * (float)r[((i)*rP)+j-1]);
temp += (PSF[4] * (float)r[((i)*rP)+j]);
temp += (PSF[5] * (float)r[((i)*rP)+j+1]);
temp += (PSF[6] * (float)r[((i+1)*rP)+j-1]);
temp += (PSF[7] * (float)r[((i+1)*rP)+j]);
temp += (PSF[8] * (float)r[((i+1)*rP)+j+1]);
if(temp<0.0) temp=0.0;
if(temp>255.0) temp=255.0;
newR[(i*newRp)+j]=(uint8_t)temp;
temp=0;
temp += (PSF[0] * (float)g[((i-1)*gP)+j-1]);
temp += (PSF[1] * (float)g[((i-1)*gP)+j]);
temp += (PSF[2] * (float)g[((i-1)*gP)+j+1]);
temp += (PSF[3] * (float)g[((i)*gP)+j-1]);
temp += (PSF[4] * (float)g[((i)*gP)+j]);
temp += (PSF[5] * (float)g[((i)*gP)+j+1]);
temp += (PSF[6] * (float)g[((i+1)*gP)+j-1]);
temp += (PSF[7] * (float)g[((i+1)*gP)+j]);
temp += (PSF[8] * (float)g[((i+1)*gP)+j+1]);
if(temp<0.0) temp=0.0;
if(temp>255.0) temp=255.0;
newG[(i*newGp)+j]=(uint8_t)temp;
temp=0;
temp += (PSF[0] * (float)b[((i-1)*bP)+j-1]);
temp += (PSF[1] * (float)b[((i-1)*bP)+j]);
temp += (PSF[2] * (float)b[((i-1)*bP)+j+1]);
temp += (PSF[3] * (float)b[((i)*bP)+j-1]);
temp += (PSF[4] * (float)b[((i)*bP)+j]);
temp += (PSF[5] * (float)b[((i)*bP)+j+1]);
temp += (PSF[6] * (float)b[((i+1)*bP)+j-1]);
temp += (PSF[7] * (float)b[((i+1)*bP)+j]);
temp += (PSF[8] * (float)b[((i+1)*bP)+j+1]);
if(temp<0.0) temp=0.0;
if(temp>255.0) temp=255.0;
newB[(i*newBp)+j]=(uint8_t)temp;
}
else if(0==th_row || nROW-1==th_row || 0==th_col || nCOL-1==th_col){
newR[(i*newRp)+j]=(uint8_t)r[(i*rP)+j];
newG[(i*newGp)+j]=(uint8_t)g[(i*gP)+j];
newB[(i*newBp)+j]=(uint8_t)b[(i*bP)+j];
}
}
#endif
int main(int argc, char *argv[]){
int i,j,test;
char infilename[128];
int infd,outfd;
char ppm_header[128];
#ifdef USE_ROCM
hipError_t cuda_ret;
dim3 mainGrid(80,60); dim3 rowBlock(4,4);
#endif
if(2!=argc){
printf("Usage:: ./filename imagefile.ppm\nExit\n");
return -1;
}
//printf("size of uint8_t is %d\n",sizeof(uint8_t));
sprintf(infilename,"%s",argv[1]);
infd=open(infilename, O_RDONLY,0644);
if(0>infd){
perror("ERROR opening file");
exit(-1);
}
outfd=open("sharpened.ppm",(O_RDWR | O_CREAT),0666);
read(infd, ppm_header,38);
ppm_header[38]='\0';
//printf("HEADER is %s",ppm_header);
// Read the image
for(i=0;i<SIZE;i++){
read(infd,(void*)&r[i],1);
read(infd,(void*)&g[i],1);
read(infd,(void*)&b[i],1);
}
close(infd);
#ifdef USE_ROCM
// Allocate Device memory
//printf("cuda_ret=%d; success=%d; error=%d\n",cuda_ret,hipSuccess,hipErrorMemoryAllocation);
cuda_ret=hipMallocPitch(&rdev,&rdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==hipSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==hipErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=hipMallocPitch(&gdev,&gdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==hipSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==hipErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=hipMallocPitch(&bdev,&bdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==hipSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==hipErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=hipMallocPitch(&sharpRdev,&sharpRdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==hipSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==hipErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=hipMallocPitch(&sharpGdev,&sharpGdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==hipSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==hipErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=hipMallocPitch(&sharpBdev,&sharpBdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==hipSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==hipErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
printf("Pitch sizes: %d %d %d %d %d %d\n",rdevp,gdevp,bdevp,sharpRdevp,sharpGdevp,sharpBdevp);
gettimeofday(&tv_start,NULL);
// Copy from host to device memory
cuda_ret=hipMemcpy2D((void*)rdev,rdevp,(const void*)r,nCOL*sizeof(uint8_t),nCOL*sizeof(uint8_t),nROW,hipMemcpyHostToDevice);
//printf("hipMemcpy2D returns=%d\n");
cuda_ret=hipMemcpy2D((void*)gdev,gdevp,(const void*)g,nCOL*sizeof(uint8_t),nCOL*sizeof(uint8_t),nROW,hipMemcpyHostToDevice);
//printf("hipMemcpy2D returns=%d\n");
cuda_ret=hipMemcpy2D((void*)bdev,bdevp,(const void*)b,nCOL*sizeof(uint8_t),nCOL*sizeof(uint8_t),nROW,hipMemcpyHostToDevice);
//printf("hipMemcpy2D returns=%d\n");
printf("Host to device copy .. done\n");
hipLaunchKernelGGL(( image_process), dim3(mainGrid),dim3(rowBlock), 0, 0, rdev, gdev, bdev, rdevp, gdevp, bdevp
, sharpRdev, sharpGdev, sharpBdev, sharpRdevp, sharpGdevp, sharpBdevp);
hipDeviceSynchronize();
// Copy processed RBG data from device to host memory
hipMemcpy2D((void*)sharpR,nCOL*sizeof(uint8_t),(const void*)sharpRdev,sharpRdevp,nCOL*sizeof(uint8_t),nROW,hipMemcpyDeviceToHost);
hipMemcpy2D((void*)sharpG,nCOL*sizeof(uint8_t),(const void*)sharpGdev,sharpGdevp,nCOL*sizeof(uint8_t),nROW,hipMemcpyDeviceToHost);
hipMemcpy2D((void*)sharpB,nCOL*sizeof(uint8_t),(const void*)sharpBdev,sharpBdevp,nCOL*sizeof(uint8_t),nROW,hipMemcpyDeviceToHost);
gettimeofday(&tv_end,NULL);
#endif
/* test=0;
for(i=0;i<nROW;i++){
test+=sharpR[i];
}
printf("SUM of R's is=%d\n",test);
*/
write(outfd, (void *)ppm_header, 38);
for(i=0; i<SIZE; i++)
{
write(outfd, (void *)&sharpR[i], 1);
write(outfd, (void *)&sharpG[i], 1);
write(outfd, (void *)&sharpB[i], 1);
}
close(outfd);
#ifdef USE_ROCM
hipFree(rdev);
hipFree(gdev);
hipFree(bdev);
hipFree(sharpRdev);
hipFree(sharpGdev);
hipFree(sharpBdev);
hipDeviceReset();
#endif
printf("Time elapsed= %f ms\n",(1000000*tv_end.tv_sec+tv_end.tv_usec-1000000*tv_start.tv_sec-tv_start.tv_usec)/1000.0);
return 0;
}
| 050ba6e4f895111ff159cb68883553969ad62fd0.cu | /* ****************************************
Requirements: CUDA enabled GPU installed and NVIDIA driver installed
Input: a 320x240 ppm image file given in command line as argument
Output: Image processed ppm file. This looks sharper.
How: This program uses the same algorithm as found in Prof Siewert's "sharpen.c" code for the image processing part.
The computation is performed in the GP-GPU.
Aurhor: Adnan Reza
Credit: Sam Siewert for the original image processing code segment.
****************************************** */
#include<cutil_inline.h>
#include<cuda_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#include<stdint.h>
#include<unistd.h>
#include<sys/types.h>
#include<unistd.h>
#include<fcntl.h>
#include<sys/time.h>
#include<errno.h>
#include<time.h>
#define nROW (240)
#define nCOL (320)
#define SIZE (nROW*nCOL)
#define USE_CUDA
// Host memory for image pixels
uint8_t r[SIZE];
uint8_t g[SIZE];
uint8_t b[SIZE];
uint8_t sharpR[SIZE];
uint8_t sharpG[SIZE];
uint8_t sharpB[SIZE];
// Device memory for image pixels
uint8_t *rdev; size_t rdevp;
uint8_t *gdev; size_t gdevp;
uint8_t *bdev; size_t bdevp;
uint8_t *sharpRdev; size_t sharpRdevp;
uint8_t *sharpGdev; size_t sharpGdevp;
uint8_t *sharpBdev; size_t sharpBdevp;
struct timeval tv_start, tv_end;
#ifdef USE_CUDA
// The CUDA Kernel to perform the computations
__global__ void image_process(uint8_t* r, uint8_t* g, uint8_t* b, size_t rP, size_t gP, size_t bP
,uint8_t* newR, uint8_t* newG, uint8_t* newB, size_t newRp, size_t newGp, size_t newBp){
int temp,ii;
double K=4.0;
double PSF[9] = {-K/8.0, -K/8.0, -K/8.0, -K/8.0, K+1.0, -K/8.0, -K/8.0, -K/8.0, -K/8.0};
int th_row=blockIdx.y*blockDim.y+threadIdx.y;
int th_col=blockIdx.x*blockDim.x+threadIdx.x;
int i=th_row; int j=th_col;
newR[(i*newRp)+j]=(uint8_t)0;
newG[(i*newGp)+j]=(uint8_t)0;
newB[(i*newBp)+j]=(uint8_t)0;
if(0<th_row && nROW-1>th_row && 0<th_col && nCOL-1>th_col){
temp=0;
temp += (PSF[0] * (float)r[((i-1)*rP)+j-1]);
temp += (PSF[1] * (float)r[((i-1)*rP)+j]);
temp += (PSF[2] * (float)r[((i-1)*rP)+j+1]);
temp += (PSF[3] * (float)r[((i)*rP)+j-1]);
temp += (PSF[4] * (float)r[((i)*rP)+j]);
temp += (PSF[5] * (float)r[((i)*rP)+j+1]);
temp += (PSF[6] * (float)r[((i+1)*rP)+j-1]);
temp += (PSF[7] * (float)r[((i+1)*rP)+j]);
temp += (PSF[8] * (float)r[((i+1)*rP)+j+1]);
if(temp<0.0) temp=0.0;
if(temp>255.0) temp=255.0;
newR[(i*newRp)+j]=(uint8_t)temp;
temp=0;
temp += (PSF[0] * (float)g[((i-1)*gP)+j-1]);
temp += (PSF[1] * (float)g[((i-1)*gP)+j]);
temp += (PSF[2] * (float)g[((i-1)*gP)+j+1]);
temp += (PSF[3] * (float)g[((i)*gP)+j-1]);
temp += (PSF[4] * (float)g[((i)*gP)+j]);
temp += (PSF[5] * (float)g[((i)*gP)+j+1]);
temp += (PSF[6] * (float)g[((i+1)*gP)+j-1]);
temp += (PSF[7] * (float)g[((i+1)*gP)+j]);
temp += (PSF[8] * (float)g[((i+1)*gP)+j+1]);
if(temp<0.0) temp=0.0;
if(temp>255.0) temp=255.0;
newG[(i*newGp)+j]=(uint8_t)temp;
temp=0;
temp += (PSF[0] * (float)b[((i-1)*bP)+j-1]);
temp += (PSF[1] * (float)b[((i-1)*bP)+j]);
temp += (PSF[2] * (float)b[((i-1)*bP)+j+1]);
temp += (PSF[3] * (float)b[((i)*bP)+j-1]);
temp += (PSF[4] * (float)b[((i)*bP)+j]);
temp += (PSF[5] * (float)b[((i)*bP)+j+1]);
temp += (PSF[6] * (float)b[((i+1)*bP)+j-1]);
temp += (PSF[7] * (float)b[((i+1)*bP)+j]);
temp += (PSF[8] * (float)b[((i+1)*bP)+j+1]);
if(temp<0.0) temp=0.0;
if(temp>255.0) temp=255.0;
newB[(i*newBp)+j]=(uint8_t)temp;
}
else if(0==th_row || nROW-1==th_row || 0==th_col || nCOL-1==th_col){
newR[(i*newRp)+j]=(uint8_t)r[(i*rP)+j];
newG[(i*newGp)+j]=(uint8_t)g[(i*gP)+j];
newB[(i*newBp)+j]=(uint8_t)b[(i*bP)+j];
}
}
#endif
int main(int argc, char *argv[]){
int i,j,test;
char infilename[128];
int infd,outfd;
char ppm_header[128];
#ifdef USE_CUDA
cudaError_t cuda_ret;
dim3 mainGrid(80,60); dim3 rowBlock(4,4);
#endif
if(2!=argc){
printf("Usage:: ./filename imagefile.ppm\nExit\n");
return -1;
}
//printf("size of uint8_t is %d\n",sizeof(uint8_t));
sprintf(infilename,"%s",argv[1]);
infd=open(infilename, O_RDONLY,0644);
if(0>infd){
perror("ERROR opening file");
exit(-1);
}
outfd=open("sharpened.ppm",(O_RDWR | O_CREAT),0666);
read(infd, ppm_header,38);
ppm_header[38]='\0';
//printf("HEADER is %s",ppm_header);
// Read the image
for(i=0;i<SIZE;i++){
read(infd,(void*)&r[i],1);
read(infd,(void*)&g[i],1);
read(infd,(void*)&b[i],1);
}
close(infd);
#ifdef USE_CUDA
// Allocate Device memory
//printf("cuda_ret=%d; success=%d; error=%d\n",cuda_ret,cudaSuccess,cudaErrorMemoryAllocation);
cuda_ret=cudaMallocPitch(&rdev,&rdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==cudaSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==cudaErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=cudaMallocPitch(&gdev,&gdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==cudaSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==cudaErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=cudaMallocPitch(&bdev,&bdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==cudaSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==cudaErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=cudaMallocPitch(&sharpRdev,&sharpRdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==cudaSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==cudaErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=cudaMallocPitch(&sharpGdev,&sharpGdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==cudaSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==cudaErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
cuda_ret=cudaMallocPitch(&sharpBdev,&sharpBdevp,nCOL*sizeof(uint8_t),nROW);
//if(cuda_ret==cudaSuccess) printf("CUDA MEM ALLOC SUCCESS\n");
//else if(cuda_ret==cudaErrorMemoryAllocation) printf("CUDA MEM ALLOC ERROR\n");
printf("Pitch sizes: %d %d %d %d %d %d\n",rdevp,gdevp,bdevp,sharpRdevp,sharpGdevp,sharpBdevp);
gettimeofday(&tv_start,NULL);
// Copy from host to device memory
cuda_ret=cudaMemcpy2D((void*)rdev,rdevp,(const void*)r,nCOL*sizeof(uint8_t),nCOL*sizeof(uint8_t),nROW,cudaMemcpyHostToDevice);
//printf("cudaMemcpy2D returns=%d\n");
cuda_ret=cudaMemcpy2D((void*)gdev,gdevp,(const void*)g,nCOL*sizeof(uint8_t),nCOL*sizeof(uint8_t),nROW,cudaMemcpyHostToDevice);
//printf("cudaMemcpy2D returns=%d\n");
cuda_ret=cudaMemcpy2D((void*)bdev,bdevp,(const void*)b,nCOL*sizeof(uint8_t),nCOL*sizeof(uint8_t),nROW,cudaMemcpyHostToDevice);
//printf("cudaMemcpy2D returns=%d\n");
printf("Host to device copy .. done\n");
image_process<<<mainGrid,rowBlock>>>(rdev, gdev, bdev, rdevp, gdevp, bdevp
, sharpRdev, sharpGdev, sharpBdev, sharpRdevp, sharpGdevp, sharpBdevp);
cudaThreadSynchronize();
// Copy processed RBG data from device to host memory
cudaMemcpy2D((void*)sharpR,nCOL*sizeof(uint8_t),(const void*)sharpRdev,sharpRdevp,nCOL*sizeof(uint8_t),nROW,cudaMemcpyDeviceToHost);
cudaMemcpy2D((void*)sharpG,nCOL*sizeof(uint8_t),(const void*)sharpGdev,sharpGdevp,nCOL*sizeof(uint8_t),nROW,cudaMemcpyDeviceToHost);
cudaMemcpy2D((void*)sharpB,nCOL*sizeof(uint8_t),(const void*)sharpBdev,sharpBdevp,nCOL*sizeof(uint8_t),nROW,cudaMemcpyDeviceToHost);
gettimeofday(&tv_end,NULL);
#endif
/* test=0;
for(i=0;i<nROW;i++){
test+=sharpR[i];
}
printf("SUM of R's is=%d\n",test);
*/
write(outfd, (void *)ppm_header, 38);
for(i=0; i<SIZE; i++)
{
write(outfd, (void *)&sharpR[i], 1);
write(outfd, (void *)&sharpG[i], 1);
write(outfd, (void *)&sharpB[i], 1);
}
close(outfd);
#ifdef USE_CUDA
cudaFree(rdev);
cudaFree(gdev);
cudaFree(bdev);
cudaFree(sharpRdev);
cudaFree(sharpGdev);
cudaFree(sharpBdev);
cudaThreadExit();
#endif
printf("Time elapsed= %f ms\n",(1000000*tv_end.tv_sec+tv_end.tv_usec-1000000*tv_start.tv_sec-tv_start.tv_usec)/1000.0);
return 0;
}
|
a04ca0df602507b012210c1419cc19e91534c323.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../knet.h"
template<typename dType>
__global__ void _nceforw(int m, int n, dType *kq, dType *s, dType *p) {
int ij = threadIdx.x + blockIdx.x * blockDim.x;
int mn = m*n;
while(ij < mn) {
int i = ij % m;
dType exps = exp(s[ij]);
p[ij] = exps/(exps+kq[i]);
ij += blockDim.x * gridDim.x;
}
}
extern "C" {
void nceforw32(int m, int n, float *kq, float *s, float *p) KCALL(_nceforw,m,n,kq,s,p);
void nceforw64(int m, int n, double *kq, double *s, double *p) KCALL(_nceforw,m,n,kq,s,p);
}
| a04ca0df602507b012210c1419cc19e91534c323.cu | #include "../knet.h"
template<typename dType>
__global__ void _nceforw(int m, int n, dType *kq, dType *s, dType *p) {
int ij = threadIdx.x + blockIdx.x * blockDim.x;
int mn = m*n;
while(ij < mn) {
int i = ij % m;
dType exps = exp(s[ij]);
p[ij] = exps/(exps+kq[i]);
ij += blockDim.x * gridDim.x;
}
}
extern "C" {
void nceforw32(int m, int n, float *kq, float *s, float *p) KCALL(_nceforw,m,n,kq,s,p);
void nceforw64(int m, int n, double *kq, double *s, double *p) KCALL(_nceforw,m,n,kq,s,p);
}
|
0a8a5a7c3411067aba335620a2001e999583eb63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int tid = threadIdx.x;
d_out[tid] = d_in[tid] * d_in[tid] * d_in[tid];
}
int main(int argc, char ** arhv){
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//generate the input array on the host
float h_in[ARRAY_SIZE];
for( int i = 0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// Allocate GPU Memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
//print out result
for(int i = 0; i < ARRAY_SIZE; i++){
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t": "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
} | 0a8a5a7c3411067aba335620a2001e999583eb63.cu | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int tid = threadIdx.x;
d_out[tid] = d_in[tid] * d_in[tid] * d_in[tid];
}
int main(int argc, char ** arhv){
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//generate the input array on the host
float h_in[ARRAY_SIZE];
for( int i = 0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// Allocate GPU Memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//print out result
for(int i = 0; i < ARRAY_SIZE; i++){
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t": "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
19891f8d57b9c99f3325d073e2f7be372269d4a4.hip | // !!! This is a file automatically generated by hipify!!!
//-----------------------------------------------------------------------
// Particle Filters Simulation --- SEQUENTIAL ALGORITHM
//-----------------------------------------------------------------------
// Written by: Javier Pastorino
// Updated in Dec-2016
//-----------------------------------------------------------------------
#include <iostream>
#include <iomanip>
#include <cmath>
#include <time.h>
#include <cstdlib>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <omp.h>
using namespace std;
//-----------------------------------------------------------------------
// Data Structures and Constants
//-----------------------------------------------------------------------
#define THREADS_PER_BLOCK 1024
#define MAX_PART_TO_PRINT 20
struct Particle {
int x,y;
double weight;
bool choosen;
};
struct Robot {
int x,y;
};
Robot theRobot; /*Variable that register the real position of the robot. Used to simulate the probabilities of the particles and compare if the algorithm succeeds.*/
bool showOutput=false;
/********************************************************************/
bool GetUserInput(int argc, char *argv[],int& spaceDimention, long& particleQuantity){
/*Gets the parameters from input*/
bool isOK = true;
int robotX,robotY;
if(argc < 4)
{
cout << "Arguments:<spaceDimention> <particleQuantity> <showOutput>" << endl;
cout << "spaceDimention : Space Matrix size [ spaceDimention X spaceDimention]" << endl;
cout << "particleQuantity : number of particles to create" << endl;
cout << "showOutput : 0|1 if show iteration output. Summary will always be shown." << endl;
isOK = false;
}
else
{
//get spaceDimention
spaceDimention = atoi(argv[1]);
if (spaceDimention <=0)
{
cout << "Space size must be larger than 0" <<endl;
isOK = false;
}
//get particleQuantity
particleQuantity = atol(argv[2]);
if (particleQuantity <= 0)
{
cout << "Particles must be more than 0" <<endl;
isOK = false;
}
//get showOutput
int SO = atoi(argv[3]);
if (SO ==0) showOutput=false;
else
if (SO ==1) showOutput=true;
else {
cout << "showOutput should be 0 or 1" <<endl;
isOK = false;
}
if (isOK){
/*Read Robot Initial Location*/
cout<<"Select robot position X (0.."<<spaceDimention-1<<"): "; cin>>robotX;
cout<<"Select robot position Y (0.."<<spaceDimention-1<<"): "; cin>>robotY;
theRobot.x=robotX; theRobot.y=robotY;
}
}
return isOK;
}
/********************************************************************/
void updateRobotMovements(int &dx, int &dy){
srand (time(NULL));/* initialize random seed: */
if ( (rand()%100 ) > 50) {
dx = 1;
dy=0;
}
else {
dx = 0;
dy=1;
}
}
/**************************** Run in CPU ****************************************/
double cpuEstimateParticleWeight(Particle aParticle){
/* Estimates the weigh of a particle being in the robots position. */
double distance = sqrt( pow( theRobot.x - aParticle.x ,2) + pow( theRobot.y - aParticle.y ,2) );
if (distance == 0)
return 1;
else
return 1/distance;
}
/***************************** Run in CPU ***************************************/
void estimateParticlesWeight(Particle* particleSpace, long particleQuantity){
/*For first initialization calculates the particle weight. Could be improved in parallel*/
double normWeight=0;
for (long i=0;i<particleQuantity;i++){
particleSpace[i].weight = cpuEstimateParticleWeight(particleSpace[i]);
normWeight += particleSpace[i].weight;
}
for (long i=0; i<particleQuantity; i++){
normWeight += particleSpace[i].weight;
}
for (long i=0; i<particleQuantity; i++){
particleSpace[i].weight = (1 / normWeight) * particleSpace[i].weight;
}
}
/********************************************************************/
void drawFirstParticleSet(Particle* particleSpace, int spaceDimention, long particleQuantity){
/*Draw the first <particleQuantity> particles inside the matrix [spaceDimention X spaceDimention]*/
int x,y;
srand (time(NULL));/* initialize random seed: */
for ( int i=0; i<particleQuantity; i++){
x = rand() % spaceDimention;/* generate secret number between 1 and spaceDimention: */
y = rand() % spaceDimention;/* generate secret number between 1 and spaceDimention: */
particleSpace[i].x=x;
particleSpace[i].y=y;
particleSpace[i].weight=0;
particleSpace[i].choosen=false;
}
}
/********************************************************************/
double calculateRandomProbability(long particleQuantity){
double randomProbability=0;
srand (time(NULL));/* initialize random seed: */
randomProbability = (double)(((rand() << 15) + rand()) & ((1 << 24) - 1)) / (1 << 24);
randomProbability = randomProbability * ( (double)1 / (double) particleQuantity );
return randomProbability;
}
/********************************************************************/
void printMatrixParticles (Particle* particleSpace, long particleQuantity, int spaceDimention){
/*Prints the current particles to screen*/
if (spaceDimention <= MAX_PART_TO_PRINT && showOutput){
long particlesUnderRobot=0;
for (int i=0; i < spaceDimention; i++){
printf("ROW [%2i] ", i );
for (int j=0; j < spaceDimention; j++){
long count=0;
for ( int k = 0; k< particleQuantity; k++ ){
if ( particleSpace[k].x == i && particleSpace[k].y == j)
count++;
}
if ( theRobot.x == i && theRobot.y == j){
cout << "R.";
particlesUnderRobot=count;
}
else{
if (count >0) printf("%2i ", count);
else cout <<"--";
}
}
cout << endl;
}
printf("Particles Under Robot: %2i ", particlesUnderRobot);
}
}
/********************************************************************/
void printParticles (Particle* particleSpace, long particleQuantity, int spaceDimention){
/*Prints the current particles to screen*/
if (spaceDimention <= MAX_PART_TO_PRINT && showOutput){
for ( int i = 0; i < particleQuantity; i++ )
cout<<"particle "<< i << " X:"<<particleSpace[i].x << " Y:" << particleSpace[i].y << " Weight:"<< particleSpace[i].weight<<" Choosen:"<<particleSpace[i].choosen<<endl;
}
}
/********************************************************************/
void displayInitialConfiguration(int spaceDimention, long particleQuantity){
system ("clear");
cout << "Simulation Configuration:" <<endl << "Space Dimention:" <<spaceDimention<<endl <<"Number of Particles:"<<particleQuantity<<endl;
cout << "Robot initial position (x,y) = ("<<theRobot.x<<","<<theRobot.y<<")"<<endl;
cout <<"---------------------------------------------------"<<endl;
cout <<"Press any key to start...\n";
std::cin.ignore();
}
/********************************************************************/
void printParticleProbability(Particle* particleSpace, long particleQuantity , int spaceDimention){
/* Prints a summary of the particles and the probabylity for each one*/
long summaryQty = 0; //Stores the numeber of particles.
double *particleProbability = new double[particleQuantity]; //Stores the probability
long *particleNumber = new long[particleQuantity]; //Stores the number of particles in an specific cell
Particle *particleSummary = new Particle[particleQuantity]; //Stores the particles
for (long i=0; i < particleQuantity; i++){
particleProbability[i]=0;
particleNumber[i]=0;
particleSummary[i].x = -1;
particleSummary[i].y = -1;
}
for (long i=0; i<particleQuantity; i++){
bool found=false;
long index=0;
long j=0;
while (j<summaryQty and !found ){ /*Search for the particle in the Summary array*/
if ( particleSpace[i].x == particleSummary[j].x
&&
particleSpace[i].y == particleSummary[j].y ){
found=true;
index=j;
}
j++;
}
if (! found){ /*must add it*/
index=summaryQty;
particleSummary[index].x = particleSpace[i].x;
particleSummary[index].y = particleSpace[i].y;
summaryQty++;
}
particleProbability[index] += particleSpace[i].weight;
particleNumber[index] ++;
}
system ("clear");
cout << "Simulation Configuration:" <<endl << "Space Dimention:" <<spaceDimention<<endl <<"Number of Particles:"<<particleQuantity<<endl;
cout << "Robot Final position (x,y) = ("<<theRobot.x<<","<<theRobot.y<<")"<<endl;
cout <<"---------------------------------------------------"<<endl;
cout<<"Particle Summary:\n";
for (long j=0; j < summaryQty; j++){
printf("Position (%3i) (x,y)=(%5i,%5i) #Particles: %7i Probability of robot here: %6f%% \n", j,particleSummary[j].x,particleSummary[j].y,particleNumber[j],((double)particleProbability[j]*100) );
}
delete[] particleProbability;
delete[] particleNumber;
delete[] particleSummary;
}
/******************************** RUNS IN GPU ************************************/
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
__global__ void normalizeWeights(Particle* particleSpace, long particleQuantity){
/*Normalizes the weight of the particles so Sum(Wi)==1. we calculate the normalization factor as the sum of the weight. */
unsigned int threadId = ( blockIdx.x * blockDim.x ) + threadIdx.x;
__shared__ double normWeight;
if (threadId == 0){
//do SERIAL Reduction
normWeight=0;
for (long i=0; i<particleQuantity; i++)
normWeight+=particleSpace[i].weight;
}
__syncthreads();
if (threadId<particleQuantity)
particleSpace[threadId].weight = (1 / normWeight) * particleSpace[threadId].weight;
}
/********************************************************************/
void prefixCalculation(Particle *particles, double *prefix, long N)
{
double *threadPrefix;
#pragma omp parallel shared(particles,prefix,threadPrefix,N)
{
const int ithread = omp_get_thread_num();
const int nthreads = omp_get_num_threads();
#pragma omp single
{
/*
Stores the max of the particles.weight section calculated by each thread to be sum up to the subsequents parts of the prefix.
For K threads will store K+1 values. First one (i==0 )has value 0. Then one per thread. Value for thread K I wont need it but keep it for alignment */
threadPrefix = new double[nthreads+1];
threadPrefix[0] = 0;
}
/* Calculates a subarray prefix. */
double sum=0;
#pragma omp for schedule(static)
for (long i=0; i<N; i++) {
sum += particles[i].weight;
prefix[i] = sum;
}
/* Each array will record the last value on threadPrefix at threadId +1 */
threadPrefix[ithread+1]=sum;
#pragma omp barrier //Thread Syncronization.
/* Make a prefix of the Thread Sums */
#pragma omp single
{
//for (int k=0;k<nthreads;k++){cout<< threadPrefix[k] << '|';}
//cout<<endl;
for (long k=1; k<nthreads; k++){ //Starting at position == 1
threadPrefix[k] += threadPrefix[k-1];
}
//for (int k=0;k<nthreads;k++){cout<< threadPrefix[k] << '|';}
//cout<<endl;
}
/* Now sumup the subarray prefixes with the subtotal calculated and get the final total prefix. */
#pragma omp for schedule(static)
for (long i=0; i<N; i++) {
/* Last value of partialPrefix will never be used as the last threadid (K) wrote in position K+1 */
prefix[i] += threadPrefix[ithread];
}
}
}
/**************************** Run in GPU ****************************************/
__device__ double estimateParticleWeight(Particle aParticle, Robot theRobot){
/* Estimates the weigh of a particle being in the robots position. */
double distance = sqrt( (double)(pow( (double)(theRobot.x - aParticle.x) ,2) + pow( (double)(theRobot.y - aParticle.y) ,2)) );
if (distance == 0)
return 1;
else
return 1/distance;
}
/******************************** RUNS IN GPU ************************************/
__global__ void applyParticleFilters(Particle* particleSpace, double* cumulativeWeight, int spaceDimention, long particleQuantity, int dx, int dy, Robot theRobot, double randomProbability){
unsigned int threadId = ( blockIdx.x * blockDim.x ) + threadIdx.x;
long j=0;
if (threadId < particleQuantity) //Control access in particle array boundaries
{
double uk = (double) randomProbability + ( (double) threadId / particleQuantity ) ;
while (uk > cumulativeWeight[j] && j<particleQuantity-1) { j ++; }
//APPLY RANDOM MOVEMENT.
particleSpace[threadId].x = particleSpace[j].x + dx;
particleSpace[threadId].y = particleSpace[j].y + dy;
particleSpace[threadId].choosen = true;
//Boundary control.
if (particleSpace[threadId].x < 0 ||
particleSpace[threadId].x > spaceDimention-1) {
particleSpace[threadId].x = particleSpace[threadId].x + (dx * - 2);
}
if (particleSpace[threadId].y < 0 ||
particleSpace[threadId].y > spaceDimention-1) {
particleSpace[threadId].y = particleSpace[threadId].y + (dy * - 2);
}
particleSpace[threadId].weight = estimateParticleWeight(particleSpace[threadId], theRobot);
}
}
//********************************************************************
// Main Program
//********************************************************************
int main(int argc, char *argv[])
{
/*********************/
/**** Variables */
int dx,dy; //Robot movements
int spaceDimention;
long particleQuantity;
Particle *particleSpace;
Particle *d_particleSpace;
double *cumulativeWeight;
double *d_cumulativeWeight;
double randomProbability;
float runtime;
if ( GetUserInput(argc,argv,spaceDimention,particleQuantity) == false ) return 1;
//Configure GPU Thread distribution.
int numOfBlocks = particleQuantity / THREADS_PER_BLOCK + ((particleQuantity%THREADS_PER_BLOCK)?1:0);
displayInitialConfiguration(spaceDimention,particleQuantity); /*Prints initial configuration.*/
cout <<"RUNNIN CUDA WITH BLOCK:"<<numOfBlocks<<" AND THREADS:"<<THREADS_PER_BLOCK<<endl;
runtime = clock()/(float)CLOCKS_PER_SEC;
//Initialize the robot movements.
updateRobotMovements(dx,dy);
particleSpace = new Particle[particleQuantity]; /*Allocates memory for the particles.*/
cumulativeWeight=new double[particleQuantity];
//Allocate memory on device for the particles and prefix.
hipMalloc((void**)&d_particleSpace, particleQuantity*sizeof(Particle));
hipMalloc((void**)&d_cumulativeWeight, particleQuantity*sizeof(double)); //Prefix Calculation.
drawFirstParticleSet(particleSpace,spaceDimention,particleQuantity); /*draw the first set of particles sparsed on the grid.*/
estimateParticlesWeight(particleSpace,particleQuantity);//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
printMatrixParticles(particleSpace,particleQuantity,spaceDimention); /*prints the inital particles (depending on size)*/
printParticles(particleSpace,particleQuantity,spaceDimention); /*prints the inital particles (depending on size)*/
//Copy Particles to the GPU. After that all procedure will occur on the GPU until finished.
hipMemcpy(d_particleSpace, particleSpace, particleQuantity*sizeof(Particle), hipMemcpyHostToDevice);
/*ITERATE!!!*/
int iterationsToDo=0;
while (iterationsToDo < (spaceDimention*0.5) ){
if (iterationsToDo % (spaceDimention/2) == 0 )
updateRobotMovements(dx,dy);
if (showOutput){ cout <<"Iteration No."<<iterationsToDo<<" Press any key to continue...\n"; std::cin.ignore(); }
theRobot.x = theRobot.x + dx; theRobot.y = theRobot.y + dy; /*Robot Moves.*/
//Boundary control.
if (theRobot.x < 0 || theRobot.x > spaceDimention-1) {theRobot.x = theRobot.x + (dx * -2); dx = dx * -1;}
if (theRobot.y < 0 || theRobot.y > spaceDimention-1) {theRobot.y = theRobot.y + (dy * -2); dy = dy * -1;}
/*Calculating the cumulative weight -- Prefix -- Parallel Algorithm OPENMP*/
prefixCalculation(particleSpace,cumulativeWeight,particleQuantity);
hipMemcpy(d_cumulativeWeight, cumulativeWeight, particleQuantity*sizeof(double), hipMemcpyHostToDevice); //Copy Prefix to GPU so particleFilters can read them
randomProbability = calculateRandomProbability(particleQuantity);
hipLaunchKernelGGL(( applyParticleFilters), dim3(numOfBlocks),dim3(THREADS_PER_BLOCK), 0, 0, d_particleSpace, d_cumulativeWeight, spaceDimention, particleQuantity,dx,dy, theRobot, randomProbability); //Lets apply the particle filter
hipDeviceSynchronize(); //Sync threads to continue next step
hipLaunchKernelGGL(( normalizeWeights), dim3(numOfBlocks),dim3(THREADS_PER_BLOCK), 0, 0, d_particleSpace, particleQuantity);//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
hipDeviceSynchronize(); //Sync threads to continue to next iteration.
//Copy back the data from GPU to CPU.
hipMemcpy(particleSpace, d_particleSpace, particleQuantity*sizeof(Particle), hipMemcpyDeviceToHost); //Copy particles to CPU so OpenMP prefix can read.
//Could not print as data is in GPU,otherwise had to copyback: printMatrixParticles(particleSpace,particleQuantity,spaceDimention); /*prints the inital particles (depending on size)*/
iterationsToDo++;
}
//////// Display Information.
cout<<endl;
printMatrixParticles(particleSpace,particleQuantity,spaceDimention);
cout<<endl<<endl;
printParticleProbability(particleSpace, particleQuantity, spaceDimention );
cout<<endl<<endl;
runtime = clock()/(float)CLOCKS_PER_SEC - runtime;
cout<< "Program runs in " << setiosflags(ios::fixed) << setprecision(2) << runtime << " seconds\n";
hipFree(d_particleSpace);
hipFree(d_cumulativeWeight);
delete[] particleSpace;
cout <<"---------------------------------------------------"<<endl;
cout <<"----- Simulation Ended ----"<<endl;
cout <<"---------------------------------------------------"<<endl;
return 0;
} | 19891f8d57b9c99f3325d073e2f7be372269d4a4.cu | //-----------------------------------------------------------------------
// Particle Filters Simulation --- SEQUENTIAL ALGORITHM
//-----------------------------------------------------------------------
// Written by: Javier Pastorino
// Updated in Dec-2016
//-----------------------------------------------------------------------
#include <iostream>
#include <iomanip>
#include <cmath>
#include <time.h>
#include <cstdlib>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <omp.h>
using namespace std;
//-----------------------------------------------------------------------
// Data Structures and Constants
//-----------------------------------------------------------------------
#define THREADS_PER_BLOCK 1024
#define MAX_PART_TO_PRINT 20
struct Particle {
int x,y;
double weight;
bool choosen;
};
struct Robot {
int x,y;
};
Robot theRobot; /*Variable that register the real position of the robot. Used to simulate the probabilities of the particles and compare if the algorithm succeeds.*/
bool showOutput=false;
/********************************************************************/
bool GetUserInput(int argc, char *argv[],int& spaceDimention, long& particleQuantity){
/*Gets the parameters from input*/
bool isOK = true;
int robotX,robotY;
if(argc < 4)
{
cout << "Arguments:<spaceDimention> <particleQuantity> <showOutput>" << endl;
cout << "spaceDimention : Space Matrix size [ spaceDimention X spaceDimention]" << endl;
cout << "particleQuantity : number of particles to create" << endl;
cout << "showOutput : 0|1 if show iteration output. Summary will always be shown." << endl;
isOK = false;
}
else
{
//get spaceDimention
spaceDimention = atoi(argv[1]);
if (spaceDimention <=0)
{
cout << "Space size must be larger than 0" <<endl;
isOK = false;
}
//get particleQuantity
particleQuantity = atol(argv[2]);
if (particleQuantity <= 0)
{
cout << "Particles must be more than 0" <<endl;
isOK = false;
}
//get showOutput
int SO = atoi(argv[3]);
if (SO ==0) showOutput=false;
else
if (SO ==1) showOutput=true;
else {
cout << "showOutput should be 0 or 1" <<endl;
isOK = false;
}
if (isOK){
/*Read Robot Initial Location*/
cout<<"Select robot position X (0.."<<spaceDimention-1<<"): "; cin>>robotX;
cout<<"Select robot position Y (0.."<<spaceDimention-1<<"): "; cin>>robotY;
theRobot.x=robotX; theRobot.y=robotY;
}
}
return isOK;
}
/********************************************************************/
void updateRobotMovements(int &dx, int &dy){
srand (time(NULL));/* initialize random seed: */
if ( (rand()%100 ) > 50) {
dx = 1;
dy=0;
}
else {
dx = 0;
dy=1;
}
}
/**************************** Run in CPU ****************************************/
double cpuEstimateParticleWeight(Particle aParticle){
/* Estimates the weigh of a particle being in the robots position. */
double distance = sqrt( pow( theRobot.x - aParticle.x ,2) + pow( theRobot.y - aParticle.y ,2) );
if (distance == 0)
return 1;
else
return 1/distance;
}
/***************************** Run in CPU ***************************************/
void estimateParticlesWeight(Particle* particleSpace, long particleQuantity){
/*For first initialization calculates the particle weight. Could be improved in parallel*/
double normWeight=0;
for (long i=0;i<particleQuantity;i++){
particleSpace[i].weight = cpuEstimateParticleWeight(particleSpace[i]);
normWeight += particleSpace[i].weight;
}
for (long i=0; i<particleQuantity; i++){
normWeight += particleSpace[i].weight;
}
for (long i=0; i<particleQuantity; i++){
particleSpace[i].weight = (1 / normWeight) * particleSpace[i].weight;
}
}
/********************************************************************/
void drawFirstParticleSet(Particle* particleSpace, int spaceDimention, long particleQuantity){
/*Draw the first <particleQuantity> particles inside the matrix [spaceDimention X spaceDimention]*/
int x,y;
srand (time(NULL));/* initialize random seed: */
for ( int i=0; i<particleQuantity; i++){
x = rand() % spaceDimention;/* generate secret number between 1 and spaceDimention: */
y = rand() % spaceDimention;/* generate secret number between 1 and spaceDimention: */
particleSpace[i].x=x;
particleSpace[i].y=y;
particleSpace[i].weight=0;
particleSpace[i].choosen=false;
}
}
/********************************************************************/
double calculateRandomProbability(long particleQuantity){
double randomProbability=0;
srand (time(NULL));/* initialize random seed: */
randomProbability = (double)(((rand() << 15) + rand()) & ((1 << 24) - 1)) / (1 << 24);
randomProbability = randomProbability * ( (double)1 / (double) particleQuantity );
return randomProbability;
}
/********************************************************************/
void printMatrixParticles (Particle* particleSpace, long particleQuantity, int spaceDimention){
/*Prints the current particles to screen*/
if (spaceDimention <= MAX_PART_TO_PRINT && showOutput){
long particlesUnderRobot=0;
for (int i=0; i < spaceDimention; i++){
printf("ROW [%2i] ", i );
for (int j=0; j < spaceDimention; j++){
long count=0;
for ( int k = 0; k< particleQuantity; k++ ){
if ( particleSpace[k].x == i && particleSpace[k].y == j)
count++;
}
if ( theRobot.x == i && theRobot.y == j){
cout << "R.";
particlesUnderRobot=count;
}
else{
if (count >0) printf("%2i ", count);
else cout <<"--";
}
}
cout << endl;
}
printf("Particles Under Robot: %2i ", particlesUnderRobot);
}
}
/********************************************************************/
void printParticles (Particle* particleSpace, long particleQuantity, int spaceDimention){
/*Prints the current particles to screen*/
if (spaceDimention <= MAX_PART_TO_PRINT && showOutput){
for ( int i = 0; i < particleQuantity; i++ )
cout<<"particle "<< i << " X:"<<particleSpace[i].x << " Y:" << particleSpace[i].y << " Weight:"<< particleSpace[i].weight<<" Choosen:"<<particleSpace[i].choosen<<endl;
}
}
/********************************************************************/
void displayInitialConfiguration(int spaceDimention, long particleQuantity){
system ("clear");
cout << "Simulation Configuration:" <<endl << "Space Dimention:" <<spaceDimention<<endl <<"Number of Particles:"<<particleQuantity<<endl;
cout << "Robot initial position (x,y) = ("<<theRobot.x<<","<<theRobot.y<<")"<<endl;
cout <<"---------------------------------------------------"<<endl;
cout <<"Press any key to start...\n";
std::cin.ignore();
}
/********************************************************************/
void printParticleProbability(Particle* particleSpace, long particleQuantity , int spaceDimention){
/* Prints a summary of the particles and the probabylity for each one*/
long summaryQty = 0; //Stores the numeber of particles.
double *particleProbability = new double[particleQuantity]; //Stores the probability
long *particleNumber = new long[particleQuantity]; //Stores the number of particles in an specific cell
Particle *particleSummary = new Particle[particleQuantity]; //Stores the particles
for (long i=0; i < particleQuantity; i++){
particleProbability[i]=0;
particleNumber[i]=0;
particleSummary[i].x = -1;
particleSummary[i].y = -1;
}
for (long i=0; i<particleQuantity; i++){
bool found=false;
long index=0;
long j=0;
while (j<summaryQty and !found ){ /*Search for the particle in the Summary array*/
if ( particleSpace[i].x == particleSummary[j].x
&&
particleSpace[i].y == particleSummary[j].y ){
found=true;
index=j;
}
j++;
}
if (! found){ /*must add it*/
index=summaryQty;
particleSummary[index].x = particleSpace[i].x;
particleSummary[index].y = particleSpace[i].y;
summaryQty++;
}
particleProbability[index] += particleSpace[i].weight;
particleNumber[index] ++;
}
system ("clear");
cout << "Simulation Configuration:" <<endl << "Space Dimention:" <<spaceDimention<<endl <<"Number of Particles:"<<particleQuantity<<endl;
cout << "Robot Final position (x,y) = ("<<theRobot.x<<","<<theRobot.y<<")"<<endl;
cout <<"---------------------------------------------------"<<endl;
cout<<"Particle Summary:\n";
for (long j=0; j < summaryQty; j++){
printf("Position (%3i) (x,y)=(%5i,%5i) #Particles: %7i Probability of robot here: %6f%% \n", j,particleSummary[j].x,particleSummary[j].y,particleNumber[j],((double)particleProbability[j]*100) );
}
delete[] particleProbability;
delete[] particleNumber;
delete[] particleSummary;
}
/******************************** RUNS IN GPU ************************************/
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
__global__ void normalizeWeights(Particle* particleSpace, long particleQuantity){
/*Normalizes the weight of the particles so Sum(Wi)==1. we calculate the normalization factor as the sum of the weight. */
unsigned int threadId = ( blockIdx.x * blockDim.x ) + threadIdx.x;
__shared__ double normWeight;
if (threadId == 0){
//do SERIAL Reduction
normWeight=0;
for (long i=0; i<particleQuantity; i++)
normWeight+=particleSpace[i].weight;
}
__syncthreads();
if (threadId<particleQuantity)
particleSpace[threadId].weight = (1 / normWeight) * particleSpace[threadId].weight;
}
/********************************************************************/
void prefixCalculation(Particle *particles, double *prefix, long N)
{
double *threadPrefix;
#pragma omp parallel shared(particles,prefix,threadPrefix,N)
{
const int ithread = omp_get_thread_num();
const int nthreads = omp_get_num_threads();
#pragma omp single
{
/*
Stores the max of the particles.weight section calculated by each thread to be sum up to the subsequents parts of the prefix.
For K threads will store K+1 values. First one (i==0 )has value 0. Then one per thread. Value for thread K I wont need it but keep it for alignment */
threadPrefix = new double[nthreads+1];
threadPrefix[0] = 0;
}
/* Calculates a subarray prefix. */
double sum=0;
#pragma omp for schedule(static)
for (long i=0; i<N; i++) {
sum += particles[i].weight;
prefix[i] = sum;
}
/* Each array will record the last value on threadPrefix at threadId +1 */
threadPrefix[ithread+1]=sum;
#pragma omp barrier //Thread Syncronization.
/* Make a prefix of the Thread Sums */
#pragma omp single
{
//for (int k=0;k<nthreads;k++){cout<< threadPrefix[k] << '|';}
//cout<<endl;
for (long k=1; k<nthreads; k++){ //Starting at position == 1
threadPrefix[k] += threadPrefix[k-1];
}
//for (int k=0;k<nthreads;k++){cout<< threadPrefix[k] << '|';}
//cout<<endl;
}
/* Now sumup the subarray prefixes with the subtotal calculated and get the final total prefix. */
#pragma omp for schedule(static)
for (long i=0; i<N; i++) {
/* Last value of partialPrefix will never be used as the last threadid (K) wrote in position K+1 */
prefix[i] += threadPrefix[ithread];
}
}
}
/**************************** Run in GPU ****************************************/
__device__ double estimateParticleWeight(Particle aParticle, Robot theRobot){
/* Estimates the weigh of a particle being in the robots position. */
double distance = sqrt( (double)(pow( (double)(theRobot.x - aParticle.x) ,2) + pow( (double)(theRobot.y - aParticle.y) ,2)) );
if (distance == 0)
return 1;
else
return 1/distance;
}
/******************************** RUNS IN GPU ************************************/
__global__ void applyParticleFilters(Particle* particleSpace, double* cumulativeWeight, int spaceDimention, long particleQuantity, int dx, int dy, Robot theRobot, double randomProbability){
unsigned int threadId = ( blockIdx.x * blockDim.x ) + threadIdx.x;
long j=0;
if (threadId < particleQuantity) //Control access in particle array boundaries
{
double uk = (double) randomProbability + ( (double) threadId / particleQuantity ) ;
while (uk > cumulativeWeight[j] && j<particleQuantity-1) { j ++; }
//APPLY RANDOM MOVEMENT.
particleSpace[threadId].x = particleSpace[j].x + dx;
particleSpace[threadId].y = particleSpace[j].y + dy;
particleSpace[threadId].choosen = true;
//Boundary control.
if (particleSpace[threadId].x < 0 ||
particleSpace[threadId].x > spaceDimention-1) {
particleSpace[threadId].x = particleSpace[threadId].x + (dx * - 2);
}
if (particleSpace[threadId].y < 0 ||
particleSpace[threadId].y > spaceDimention-1) {
particleSpace[threadId].y = particleSpace[threadId].y + (dy * - 2);
}
particleSpace[threadId].weight = estimateParticleWeight(particleSpace[threadId], theRobot);
}
}
//********************************************************************
// Main Program
//********************************************************************
int main(int argc, char *argv[])
{
/*********************/
/**** Variables */
int dx,dy; //Robot movements
int spaceDimention;
long particleQuantity;
Particle *particleSpace;
Particle *d_particleSpace;
double *cumulativeWeight;
double *d_cumulativeWeight;
double randomProbability;
float runtime;
if ( GetUserInput(argc,argv,spaceDimention,particleQuantity) == false ) return 1;
//Configure GPU Thread distribution.
int numOfBlocks = particleQuantity / THREADS_PER_BLOCK + ((particleQuantity%THREADS_PER_BLOCK)?1:0);
displayInitialConfiguration(spaceDimention,particleQuantity); /*Prints initial configuration.*/
cout <<"RUNNIN CUDA WITH BLOCK:"<<numOfBlocks<<" AND THREADS:"<<THREADS_PER_BLOCK<<endl;
runtime = clock()/(float)CLOCKS_PER_SEC;
//Initialize the robot movements.
updateRobotMovements(dx,dy);
particleSpace = new Particle[particleQuantity]; /*Allocates memory for the particles.*/
cumulativeWeight=new double[particleQuantity];
//Allocate memory on device for the particles and prefix.
cudaMalloc((void**)&d_particleSpace, particleQuantity*sizeof(Particle));
cudaMalloc((void**)&d_cumulativeWeight, particleQuantity*sizeof(double)); //Prefix Calculation.
drawFirstParticleSet(particleSpace,spaceDimention,particleQuantity); /*draw the first set of particles sparsed on the grid.*/
estimateParticlesWeight(particleSpace,particleQuantity);//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
printMatrixParticles(particleSpace,particleQuantity,spaceDimention); /*prints the inital particles (depending on size)*/
printParticles(particleSpace,particleQuantity,spaceDimention); /*prints the inital particles (depending on size)*/
//Copy Particles to the GPU. After that all procedure will occur on the GPU until finished.
cudaMemcpy(d_particleSpace, particleSpace, particleQuantity*sizeof(Particle), cudaMemcpyHostToDevice);
/*ITERATE!!!*/
int iterationsToDo=0;
while (iterationsToDo < (spaceDimention*0.5) ){
if (iterationsToDo % (spaceDimention/2) == 0 )
updateRobotMovements(dx,dy);
if (showOutput){ cout <<"Iteration No."<<iterationsToDo<<" Press any key to continue...\n"; std::cin.ignore(); }
theRobot.x = theRobot.x + dx; theRobot.y = theRobot.y + dy; /*Robot Moves.*/
//Boundary control.
if (theRobot.x < 0 || theRobot.x > spaceDimention-1) {theRobot.x = theRobot.x + (dx * -2); dx = dx * -1;}
if (theRobot.y < 0 || theRobot.y > spaceDimention-1) {theRobot.y = theRobot.y + (dy * -2); dy = dy * -1;}
/*Calculating the cumulative weight -- Prefix -- Parallel Algorithm OPENMP*/
prefixCalculation(particleSpace,cumulativeWeight,particleQuantity);
cudaMemcpy(d_cumulativeWeight, cumulativeWeight, particleQuantity*sizeof(double), cudaMemcpyHostToDevice); //Copy Prefix to GPU so particleFilters can read them
randomProbability = calculateRandomProbability(particleQuantity);
applyParticleFilters<<<numOfBlocks,THREADS_PER_BLOCK>>> (d_particleSpace, d_cumulativeWeight, spaceDimention, particleQuantity,dx,dy, theRobot, randomProbability); //Lets apply the particle filter
cudaThreadSynchronize(); //Sync threads to continue next step
normalizeWeights<<<numOfBlocks,THREADS_PER_BLOCK>>> (d_particleSpace, particleQuantity);//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
cudaThreadSynchronize(); //Sync threads to continue to next iteration.
//Copy back the data from GPU to CPU.
cudaMemcpy(particleSpace, d_particleSpace, particleQuantity*sizeof(Particle), cudaMemcpyDeviceToHost); //Copy particles to CPU so OpenMP prefix can read.
//Could not print as data is in GPU,otherwise had to copyback: printMatrixParticles(particleSpace,particleQuantity,spaceDimention); /*prints the inital particles (depending on size)*/
iterationsToDo++;
}
//////// Display Information.
cout<<endl;
printMatrixParticles(particleSpace,particleQuantity,spaceDimention);
cout<<endl<<endl;
printParticleProbability(particleSpace, particleQuantity, spaceDimention );
cout<<endl<<endl;
runtime = clock()/(float)CLOCKS_PER_SEC - runtime;
cout<< "Program runs in " << setiosflags(ios::fixed) << setprecision(2) << runtime << " seconds\n";
cudaFree(d_particleSpace);
cudaFree(d_cumulativeWeight);
delete[] particleSpace;
cout <<"---------------------------------------------------"<<endl;
cout <<"----- Simulation Ended ----"<<endl;
cout <<"---------------------------------------------------"<<endl;
return 0;
} |
70e36cb8cd472c71c4c66ee2e81f201a72849a04.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__ADAGradn.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nrows = 1;
int ncols = 1;
float *mm = NULL;
hipMalloc(&mm, XSIZE*YSIZE);
float *um = NULL;
hipMalloc(&um, XSIZE*YSIZE);
float *ssq = NULL;
hipMalloc(&ssq, XSIZE*YSIZE);
float *momentum = NULL;
hipMalloc(&momentum, XSIZE*YSIZE);
float mu = 1;
float *mask = NULL;
hipMalloc(&mask, XSIZE*YSIZE);
int maskr = 1;
float nw = 1;
float *ve = NULL;
hipMalloc(&ve, XSIZE*YSIZE);
int nve = 1;
float *ts = NULL;
hipMalloc(&ts, XSIZE*YSIZE);
int nts = 1;
float *lr = NULL;
hipMalloc(&lr, XSIZE*YSIZE);
int nlr = 1;
float langevin = 1;
float eps = 1;
int doupdate = 1;
hiprandState_t *rstates = NULL;
hipMalloc(&rstates, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__ADAGradn), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,mm,um,ssq,momentum,mu,mask,maskr,nw,ve,nve,ts,nts,lr,nlr,langevin,eps,doupdate,rstates);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__ADAGradn), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,mm,um,ssq,momentum,mu,mask,maskr,nw,ve,nve,ts,nts,lr,nlr,langevin,eps,doupdate,rstates);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__ADAGradn), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,ncols,mm,um,ssq,momentum,mu,mask,maskr,nw,ve,nve,ts,nts,lr,nlr,langevin,eps,doupdate,rstates);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 70e36cb8cd472c71c4c66ee2e81f201a72849a04.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__ADAGradn.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nrows = 1;
int ncols = 1;
float *mm = NULL;
cudaMalloc(&mm, XSIZE*YSIZE);
float *um = NULL;
cudaMalloc(&um, XSIZE*YSIZE);
float *ssq = NULL;
cudaMalloc(&ssq, XSIZE*YSIZE);
float *momentum = NULL;
cudaMalloc(&momentum, XSIZE*YSIZE);
float mu = 1;
float *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
int maskr = 1;
float nw = 1;
float *ve = NULL;
cudaMalloc(&ve, XSIZE*YSIZE);
int nve = 1;
float *ts = NULL;
cudaMalloc(&ts, XSIZE*YSIZE);
int nts = 1;
float *lr = NULL;
cudaMalloc(&lr, XSIZE*YSIZE);
int nlr = 1;
float langevin = 1;
float eps = 1;
int doupdate = 1;
curandState *rstates = NULL;
cudaMalloc(&rstates, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__ADAGradn<<<gridBlock,threadBlock>>>(nrows,ncols,mm,um,ssq,momentum,mu,mask,maskr,nw,ve,nve,ts,nts,lr,nlr,langevin,eps,doupdate,rstates);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__ADAGradn<<<gridBlock,threadBlock>>>(nrows,ncols,mm,um,ssq,momentum,mu,mask,maskr,nw,ve,nve,ts,nts,lr,nlr,langevin,eps,doupdate,rstates);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__ADAGradn<<<gridBlock,threadBlock>>>(nrows,ncols,mm,um,ssq,momentum,mu,mask,maskr,nw,ve,nve,ts,nts,lr,nlr,langevin,eps,doupdate,rstates);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
eb935458cfb75dfbb4cc97351df23da2934ceec2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* file : parallel_search.cu
* author : Tiane Zhu
* date : Mar 23, 2017
*
* this program is an implementation of the parallel search algorithm
* ALGORITHM 4.1 in
* "An Introduction to Parallel Algorithms" - by Joseph Jaja
* p146 - ISBN 9-789201-548563
*/
#include "parallel_search.h"
///////////////////////////////////////////////////////////
// Input to the algorithm //
// X -- strictly ordered array
// y (target) -- target
// p (num_threads) -- num_processor
// j (tid) -- processor idx
///////////////////////////////////////////////////////////
// Output
// i (ret) -- X[i] <= y < x[i+1]
// [ i is initialized to -1 , since it has only non-neg values
// i non-neg => i set ]
///////////////////////////////////////////////////////////
/* kernel strictly following algorithm */
// additional inputs
int * c;
// c -- c array from 0 to p+1
int * q;
// q -- q array from 0 to p+1
int * l;
// l must be allocated to num_blocks size
int * r;
// r must be allocated to num_blocks size
volatile int * dev_ret;
// dev_ret must be allocated to num_blocks size
// n is the number of elements
__device__ void search(number * X, int n, number target, int * c, int * q, int num_threads, volatile int * dev_ret, int * l, int * r){
int tid = threadIdx.x;
X += n * blockIdx.x;
l += blockIdx.x;
r += blockIdx.x;
dev_ret += blockIdx.x;
c += blockIdx.x * (blockDim.x + 2);
q += blockIdx.x * (blockDim.x + 2);
tid += 1; // so that idx starts from 1
if(tid > n) return; // safety
//1.
// initialize this part outside kernel
if(tid == 1){
*l = 0;
*r = n + 1;
c[0] = 0;
c[num_threads + 1] = 1;
*dev_ret = -2; // for thread termination purpose
}
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : %d %d\n", blockIdx.x, *l, *r);
#endif
//sync
__syncthreads();
//2.
int count = 0;
while(*r - *l > num_threads){
#ifdef PRETTY_PRINT
if(tid == 1)
printf("iter %d, block %d : %d %d\n", count, blockIdx.x, *l, *r);
#endif
if(tid == 1){
q[0] = *l;
q[num_threads + 1] = *r;
}
q[tid] = *l + tid * ((*r - *l) / (num_threads + 1));
//sync -- use r, l, p;
// -- set q
__syncthreads();
if(target == X[q[tid]]){
*dev_ret = q[tid] - 1; // so that ret idx starts from 0
// can i return here???
// no
//return;
}
else{
if(target > X[q[tid]])
c[tid] = 0;
else
c[tid] = 1;
}
//sync -- use X, q, target
// -- set l, r, c
__syncthreads();
// if ret has been set, return, a replacement for the "return" in the conditional statement;
if(*dev_ret >= -1){
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret0 %d\n", blockIdx.x, *dev_ret);
#endif
return;
}
if(c[tid] < c[tid + 1]){
*l = q[tid];
*r = q[tid + 1];
if(blockIdx.x == 0)
printf("tid %d setting l, r\n", tid);
}
if(tid == 1 && c[0] < c[1]){
*l = q[0];
*r = q[1];
if(blockIdx.x == 0)
printf("tid 1 setting l, r\n");
}
//sync -- use q, c, tid
// -- set l, r
__syncthreads();
count++;
}
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret1 %d\n", blockIdx.x, *dev_ret);
#endif
if(tid > *r - *l) return;
if(target == X[*l+tid]){
*dev_ret = *l + tid - 1; // so that ret idx starts from 0
}
else if(target > X[*l+tid]){
c[tid] = 0;
}
else{
c[tid] = 1;
}
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret2 %d\n", blockIdx.x, *dev_ret);
#endif
if(*dev_ret >= -1)
return;
if(c[tid-1] < c[tid])
*dev_ret = *l + tid - 1 - 1; // so that ret idx starts from 0
if(tid == *r - *l && c[tid] == 0)
*dev_ret = *r - 1 - 1; // so that ret idx starts from 0
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret3 %d\n", blockIdx.x, *dev_ret);
#endif
}
__device__ void fix(volatile int * dev_ret, int dev_ret_len, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(dev_ret[tid] == -2) ;
if(tid < dev_ret_len){
int idx = dev_ret[tid];
if(idx != n-1 && idx != -1){
dev_ret[0] = idx + n * tid;
}
}
}
__global__ void search_main(number * X, int n, number target, int * c, int * q, int num_threads, volatile int * dev_ret, int * l, int * r, int dev_ret_len)
{
// doesn't work for non-pow 2
int tmp_n = n / dev_ret_len;
/*
if(threadIdx.x == 0){
printf("array length : %d\n", n);
printf("tmp_n : %d\n dev_ret_len = %d\n", tmp_n, dev_ret_len);
}
*/
num_threads = num_threads > 1024 ? 1024 : num_threads;
search(X, tmp_n, target, c, q, num_threads, dev_ret, l, r);
if(blockIdx.x == 0 && threadIdx.x == 0){
printf("[ ");
for(int i=0; i<dev_ret_len; i++){
printf("%d ", dev_ret[i]);
}
printf("]\n");
}
fix(dev_ret, dev_ret_len, tmp_n);
__threadfence();
if(threadIdx.x + blockIdx.x * blockDim.x == 0)
printf("gpu found : %d\n", *dev_ret);
}
// main
int main(int argc, char * argv[])
{
setbuf(stdout, NULL);
_init(argc, argv);
if(verbose)
printf("finding target : %d in array of length %d\n", target, X_len);
hipError_t err_code[10];
float gputime, cputime;
int ret_idx, * dev_ret;
hipSetDevice(0);
hipDeviceReset();
unsigned int num_blocks = (1023 + num_threads) / 1024;
unsigned int threads_per_block = num_threads > 1024 ? 1024 : num_threads;
c_size = (2 * num_blocks + num_threads) * sizeof(int);
q_size = (2 * num_blocks + num_threads) * sizeof(int);
// X_len + 2 for the algorithm element at idx 0 and n + 1 (originally 1, 2, ..., n)
err_code[0] = hipMalloc( &dev_X , X_size );
err_code[1] = hipMalloc( &c , c_size );
err_code[2] = hipMalloc( &q , q_size );
err_code[3] = hipMalloc( &dev_ret , sizeof(volatile int) * num_blocks);
err_code[4] = hipMalloc( &l , sizeof(int) * num_blocks );
err_code[5] = hipMalloc( &r , sizeof(int) * num_blocks );
for(int i=0; i<6; i++){ gerror(err_code[i]); }
gerror(hipMemcpy(dev_X, host_X, X_size, hipMemcpyHostToDevice));
ret_idx = 10086;
printf("launching %u blocks, %u threads per block.\n", num_blocks, threads_per_block);
d->Dg = {num_blocks, 1, 1};
d->Db = {threads_per_block, 1, 1};
gstart();
hipLaunchKernelGGL(( search_main), dim3(d->Dg), dim3(d->Db), 0, 0, dev_X, X_len, target, c, q, num_threads, dev_ret, l, r, num_blocks);
gend(&gputime);
printf("gputime : %f ms\n", gputime);
gerror(hipGetLastError());
gerror( hipDeviceSynchronize() );
gerror(hipMemcpy(&ret_idx, dev_ret, sizeof(int), hipMemcpyDeviceToHost));
printf("device idx = %d;\n", ret_idx);
ret_idx = 10086;
cstart();
ret_idx = cpu_search(host_X + 1, X_len, target);
cend(&cputime);
printf("cputime : %f ms\n", cputime);
printf("host idx = %d;\n", ret_idx);
gerror(hipFree(dev_X));
gerror(hipFree(c));
gerror(hipFree(q));
gerror(hipFree(dev_ret));
gerror(hipFree(l));
gerror(hipFree(r));
free(host_X);
}
char fname[80];
void _init(int argc, char ** argv)
{
X_len = DEFAULT_ARRAY_LEN;
num_threads = DEFAULT_NUM_THREADS;
target = DEFAULT_TARGET;
fname[0] = 0;
int len_spec = 0;
for(int i=1; i<argc; i++){
switch(*argv[i]){
case '-':
switch(argv[i][1]){
case 'v':
verbose = 1;
break;
case 'f':
if(!len_spec){
strcpy(fname, argv[++i]);
len_spec = 1;
}
break;
case 't':
sscanf(argv[++i], "%d", &num_threads);
break;
case 'l':
if(!len_spec){
sscanf(argv[++i], "%d", &X_len);
len_spec = 1;
}
break;
}
break;
default:
sscanf(argv[i], FMT, &target);
}
}
X_size = (X_len + 2) * sizeof(number);
_init_array(fname[0] != 0);
prep_kernel();
}
void _init_array(int with_file)
{
host_X = (number *) malloc(X_size);
host_X[0] = INT_MIN;
host_X[X_len+1] = INT_MAX;
//not use file
if(!with_file){
for(number i=1; i<X_len+1; i++){
host_X[i] = 2 * i;
}
return;
}
//use file
FILE * fp;
printf("array file : \"%s\"", fname);
if(!(fp = fopen(fname, "r"))){
printf(" does not exist.\n");
exit(1);
}
if(fscanf(fp, "%d", &X_len) < 1){
printf(" stats broken.\n");
exit(1);
}
printf("\n");
for(int i=0; i<X_len; i++){
if(fscanf(fp, FMT, host_X + i) != 1){
printf(" missing the %dth number.\n", i);
exit(1);
}
if(verbose)
printf(FMT, host_X[i]);
}
if(verbose) printf("\n");
}
| eb935458cfb75dfbb4cc97351df23da2934ceec2.cu | /* file : parallel_search.cu
* author : Tiane Zhu
* date : Mar 23, 2017
*
* this program is an implementation of the parallel search algorithm
* ALGORITHM 4.1 in
* "An Introduction to Parallel Algorithms" - by Joseph Jaja
* p146 - ISBN 9-789201-548563
*/
#include "parallel_search.h"
///////////////////////////////////////////////////////////
// Input to the algorithm //
// X -- strictly ordered array
// y (target) -- target
// p (num_threads) -- num_processor
// j (tid) -- processor idx
///////////////////////////////////////////////////////////
// Output
// i (ret) -- X[i] <= y < x[i+1]
// [ i is initialized to -1 , since it has only non-neg values
// i non-neg => i set ]
///////////////////////////////////////////////////////////
/* kernel strictly following algorithm */
// additional inputs
int * c;
// c -- c array from 0 to p+1
int * q;
// q -- q array from 0 to p+1
int * l;
// l must be allocated to num_blocks size
int * r;
// r must be allocated to num_blocks size
volatile int * dev_ret;
// dev_ret must be allocated to num_blocks size
// n is the number of elements
__device__ void search(number * X, int n, number target, int * c, int * q, int num_threads, volatile int * dev_ret, int * l, int * r){
int tid = threadIdx.x;
X += n * blockIdx.x;
l += blockIdx.x;
r += blockIdx.x;
dev_ret += blockIdx.x;
c += blockIdx.x * (blockDim.x + 2);
q += blockIdx.x * (blockDim.x + 2);
tid += 1; // so that idx starts from 1
if(tid > n) return; // safety
//1.
// initialize this part outside kernel
if(tid == 1){
*l = 0;
*r = n + 1;
c[0] = 0;
c[num_threads + 1] = 1;
*dev_ret = -2; // for thread termination purpose
}
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : %d %d\n", blockIdx.x, *l, *r);
#endif
//sync
__syncthreads();
//2.
int count = 0;
while(*r - *l > num_threads){
#ifdef PRETTY_PRINT
if(tid == 1)
printf("iter %d, block %d : %d %d\n", count, blockIdx.x, *l, *r);
#endif
if(tid == 1){
q[0] = *l;
q[num_threads + 1] = *r;
}
q[tid] = *l + tid * ((*r - *l) / (num_threads + 1));
//sync -- use r, l, p;
// -- set q
__syncthreads();
if(target == X[q[tid]]){
*dev_ret = q[tid] - 1; // so that ret idx starts from 0
// can i return here???
// no
//return;
}
else{
if(target > X[q[tid]])
c[tid] = 0;
else
c[tid] = 1;
}
//sync -- use X, q, target
// -- set l, r, c
__syncthreads();
// if ret has been set, return, a replacement for the "return" in the conditional statement;
if(*dev_ret >= -1){
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret0 %d\n", blockIdx.x, *dev_ret);
#endif
return;
}
if(c[tid] < c[tid + 1]){
*l = q[tid];
*r = q[tid + 1];
if(blockIdx.x == 0)
printf("tid %d setting l, r\n", tid);
}
if(tid == 1 && c[0] < c[1]){
*l = q[0];
*r = q[1];
if(blockIdx.x == 0)
printf("tid 1 setting l, r\n");
}
//sync -- use q, c, tid
// -- set l, r
__syncthreads();
count++;
}
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret1 %d\n", blockIdx.x, *dev_ret);
#endif
if(tid > *r - *l) return;
if(target == X[*l+tid]){
*dev_ret = *l + tid - 1; // so that ret idx starts from 0
}
else if(target > X[*l+tid]){
c[tid] = 0;
}
else{
c[tid] = 1;
}
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret2 %d\n", blockIdx.x, *dev_ret);
#endif
if(*dev_ret >= -1)
return;
if(c[tid-1] < c[tid])
*dev_ret = *l + tid - 1 - 1; // so that ret idx starts from 0
if(tid == *r - *l && c[tid] == 0)
*dev_ret = *r - 1 - 1; // so that ret idx starts from 0
#ifdef PRETTY_PRINT
if(tid == 1)
printf("%d : dev_ret3 %d\n", blockIdx.x, *dev_ret);
#endif
}
__device__ void fix(volatile int * dev_ret, int dev_ret_len, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(dev_ret[tid] == -2) ;
if(tid < dev_ret_len){
int idx = dev_ret[tid];
if(idx != n-1 && idx != -1){
dev_ret[0] = idx + n * tid;
}
}
}
__global__ void search_main(number * X, int n, number target, int * c, int * q, int num_threads, volatile int * dev_ret, int * l, int * r, int dev_ret_len)
{
// doesn't work for non-pow 2
int tmp_n = n / dev_ret_len;
/*
if(threadIdx.x == 0){
printf("array length : %d\n", n);
printf("tmp_n : %d\n dev_ret_len = %d\n", tmp_n, dev_ret_len);
}
*/
num_threads = num_threads > 1024 ? 1024 : num_threads;
search(X, tmp_n, target, c, q, num_threads, dev_ret, l, r);
if(blockIdx.x == 0 && threadIdx.x == 0){
printf("[ ");
for(int i=0; i<dev_ret_len; i++){
printf("%d ", dev_ret[i]);
}
printf("]\n");
}
fix(dev_ret, dev_ret_len, tmp_n);
__threadfence();
if(threadIdx.x + blockIdx.x * blockDim.x == 0)
printf("gpu found : %d\n", *dev_ret);
}
// main
int main(int argc, char * argv[])
{
setbuf(stdout, NULL);
_init(argc, argv);
if(verbose)
printf("finding target : %d in array of length %d\n", target, X_len);
cudaError_t err_code[10];
float gputime, cputime;
int ret_idx, * dev_ret;
cudaSetDevice(0);
cudaDeviceReset();
unsigned int num_blocks = (1023 + num_threads) / 1024;
unsigned int threads_per_block = num_threads > 1024 ? 1024 : num_threads;
c_size = (2 * num_blocks + num_threads) * sizeof(int);
q_size = (2 * num_blocks + num_threads) * sizeof(int);
// X_len + 2 for the algorithm element at idx 0 and n + 1 (originally 1, 2, ..., n)
err_code[0] = cudaMalloc( &dev_X , X_size );
err_code[1] = cudaMalloc( &c , c_size );
err_code[2] = cudaMalloc( &q , q_size );
err_code[3] = cudaMalloc( &dev_ret , sizeof(volatile int) * num_blocks);
err_code[4] = cudaMalloc( &l , sizeof(int) * num_blocks );
err_code[5] = cudaMalloc( &r , sizeof(int) * num_blocks );
for(int i=0; i<6; i++){ gerror(err_code[i]); }
gerror(cudaMemcpy(dev_X, host_X, X_size, cudaMemcpyHostToDevice));
ret_idx = 10086;
printf("launching %u blocks, %u threads per block.\n", num_blocks, threads_per_block);
d->Dg = {num_blocks, 1, 1};
d->Db = {threads_per_block, 1, 1};
gstart();
search_main<<<d->Dg, d->Db>>>(dev_X, X_len, target, c, q, num_threads, dev_ret, l, r, num_blocks);
gend(&gputime);
printf("gputime : %f ms\n", gputime);
gerror(cudaGetLastError());
gerror( cudaDeviceSynchronize() );
gerror(cudaMemcpy(&ret_idx, dev_ret, sizeof(int), cudaMemcpyDeviceToHost));
printf("device idx = %d;\n", ret_idx);
ret_idx = 10086;
cstart();
ret_idx = cpu_search(host_X + 1, X_len, target);
cend(&cputime);
printf("cputime : %f ms\n", cputime);
printf("host idx = %d;\n", ret_idx);
gerror(cudaFree(dev_X));
gerror(cudaFree(c));
gerror(cudaFree(q));
gerror(cudaFree(dev_ret));
gerror(cudaFree(l));
gerror(cudaFree(r));
free(host_X);
}
char fname[80];
void _init(int argc, char ** argv)
{
X_len = DEFAULT_ARRAY_LEN;
num_threads = DEFAULT_NUM_THREADS;
target = DEFAULT_TARGET;
fname[0] = 0;
int len_spec = 0;
for(int i=1; i<argc; i++){
switch(*argv[i]){
case '-':
switch(argv[i][1]){
case 'v':
verbose = 1;
break;
case 'f':
if(!len_spec){
strcpy(fname, argv[++i]);
len_spec = 1;
}
break;
case 't':
sscanf(argv[++i], "%d", &num_threads);
break;
case 'l':
if(!len_spec){
sscanf(argv[++i], "%d", &X_len);
len_spec = 1;
}
break;
}
break;
default:
sscanf(argv[i], FMT, &target);
}
}
X_size = (X_len + 2) * sizeof(number);
_init_array(fname[0] != 0);
prep_kernel();
}
void _init_array(int with_file)
{
host_X = (number *) malloc(X_size);
host_X[0] = INT_MIN;
host_X[X_len+1] = INT_MAX;
//not use file
if(!with_file){
for(number i=1; i<X_len+1; i++){
host_X[i] = 2 * i;
}
return;
}
//use file
FILE * fp;
printf("array file : \"%s\"", fname);
if(!(fp = fopen(fname, "r"))){
printf(" does not exist.\n");
exit(1);
}
if(fscanf(fp, "%d", &X_len) < 1){
printf(" stats broken.\n");
exit(1);
}
printf("\n");
for(int i=0; i<X_len; i++){
if(fscanf(fp, FMT, host_X + i) != 1){
printf(" missing the %dth number.\n", i);
exit(1);
}
if(verbose)
printf(FMT, host_X[i]);
}
if(verbose) printf("\n");
}
|
29bb22d4e4e7da24e6d2efd8e0e6445acba7e703.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
const int manualBlockSize = 32;
__global__ void square(int *array, int len)
{
int gtid = blockDim.x*blockIdx.x + threadIdx.x;
if (gtid < len)
array[gtid] *= array[gtid];
}
// active warps / maximum warps per SM
static double reportPotentialOccupancy(void *kernel, int blockSize, size_t dynamicSMem)
{
int device;
hipDeviceProp_t prop;
int numBlocks;
int activeWarps;
int maxWarps;
double occupancy;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, kernel, blockSize, dynamicSMem);
activeWarps = numBlocks*blockSize / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
occupancy = (double)activeWarps / maxWarps;
return occupancy;
}
static int launchConfig(int *array, int arrayCount, bool automatic)
{
int blockSize;
int minGridSize;
int gridSize;
size_t dynamicSMemUsage = 0;
hipEvent_t start;
hipEvent_t end;
float elapsedTime;
double potentialOccupancy;
hipEventCreate(&start);
hipEventCreate(&end);
if (automatic)
{
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void *)square,
dynamicSMemUsage, arrayCount);
std::cout << "Suggested block size:"<< blockSize << std::endl;
}
else
{
blockSize = manualBlockSize;
}
gridSize = (arrayCount + blockSize - 1)/blockSize;
hipEventRecord(start);
hipLaunchKernelGGL(( square), dim3(gridSize), dim3(blockSize), dynamicSMemUsage, 0, array, arrayCount);
hipEventRecord(end);
hipDeviceSynchronize();
potentialOccupancy = reportPotentialOccupancy((void *)square, blockSize, dynamicSMemUsage);
std::cout << "potential occupancy:"<<potentialOccupancy * 100 << "%" <<std::endl;
hipEventElapsedTime(&elapsedTime, start, end);
std::cout<< "Elapsed time:" <<elapsedTime << "ms" << std::endl;
return 0;
}
static int test(bool automaticLaunchConfig, const int count = 1000000)
{
int *array;
int *dArray;
int size = count * sizeof(int);
array = new int[count];
for (int i=0;i<count;i++)
{
array[i] = i;
}
hipMalloc(&dArray,size);
hipMemcpy(dArray, array, size, hipMemcpyHostToDevice);
for (int i=0; i< count; i++)
{
array[i] = 0;
}
launchConfig(dArray,count,automaticLaunchConfig);
hipMemcpy(array, dArray, size, hipMemcpyDeviceToHost);
hipFree(dArray);
for (int i=0;i<count;i++)
{
if (array[i] != i*i)
{
std::cout << "element" << i <<" expected "<< i*i <<" actual "<<array[i]<<std::endl;
return 1;
}
}
hipDeviceReset();
delete[] array;
return 0;
}
int main()
{
int status;
std::cout << "[ Manual configuration with "<<manualBlockSize
<< " threads per block ]" << std::endl;
test(false);
test(true);
return 0;
}
| 29bb22d4e4e7da24e6d2efd8e0e6445acba7e703.cu | #include <iostream>
const int manualBlockSize = 32;
__global__ void square(int *array, int len)
{
int gtid = blockDim.x*blockIdx.x + threadIdx.x;
if (gtid < len)
array[gtid] *= array[gtid];
}
// active warps / maximum warps per SM
static double reportPotentialOccupancy(void *kernel, int blockSize, size_t dynamicSMem)
{
int device;
cudaDeviceProp prop;
int numBlocks;
int activeWarps;
int maxWarps;
double occupancy;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, kernel, blockSize, dynamicSMem);
activeWarps = numBlocks*blockSize / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
occupancy = (double)activeWarps / maxWarps;
return occupancy;
}
static int launchConfig(int *array, int arrayCount, bool automatic)
{
int blockSize;
int minGridSize;
int gridSize;
size_t dynamicSMemUsage = 0;
cudaEvent_t start;
cudaEvent_t end;
float elapsedTime;
double potentialOccupancy;
cudaEventCreate(&start);
cudaEventCreate(&end);
if (automatic)
{
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void *)square,
dynamicSMemUsage, arrayCount);
std::cout << "Suggested block size:"<< blockSize << std::endl;
}
else
{
blockSize = manualBlockSize;
}
gridSize = (arrayCount + blockSize - 1)/blockSize;
cudaEventRecord(start);
square<<<gridSize, blockSize, dynamicSMemUsage>>>(array, arrayCount);
cudaEventRecord(end);
cudaDeviceSynchronize();
potentialOccupancy = reportPotentialOccupancy((void *)square, blockSize, dynamicSMemUsage);
std::cout << "potential occupancy:"<<potentialOccupancy * 100 << "%" <<std::endl;
cudaEventElapsedTime(&elapsedTime, start, end);
std::cout<< "Elapsed time:" <<elapsedTime << "ms" << std::endl;
return 0;
}
static int test(bool automaticLaunchConfig, const int count = 1000000)
{
int *array;
int *dArray;
int size = count * sizeof(int);
array = new int[count];
for (int i=0;i<count;i++)
{
array[i] = i;
}
cudaMalloc(&dArray,size);
cudaMemcpy(dArray, array, size, cudaMemcpyHostToDevice);
for (int i=0; i< count; i++)
{
array[i] = 0;
}
launchConfig(dArray,count,automaticLaunchConfig);
cudaMemcpy(array, dArray, size, cudaMemcpyDeviceToHost);
cudaFree(dArray);
for (int i=0;i<count;i++)
{
if (array[i] != i*i)
{
std::cout << "element" << i <<" expected "<< i*i <<" actual "<<array[i]<<std::endl;
return 1;
}
}
cudaDeviceReset();
delete[] array;
return 0;
}
int main()
{
int status;
std::cout << "[ Manual configuration with "<<manualBlockSize
<< " threads per block ]" << std::endl;
test(false);
test(true);
return 0;
}
|
162a5988303ac58d88a1576eea8695a091c3054b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "RayTracer_Vec3f.hpp"
typedef float fp_t;
// typedef float3 Vec3f_Device;
template <class T>
inline __device__ T max(T x, T y) {
return x > y ? x : y;
}
template <class T>
inline __device__ T min(T x, T y) {
return x < y ? x : y;
}
// Data Structure
class Vec3f_Device {
// Data
public:
fp_t n[3];
// Operation
public:
// init
__device__ Vec3f_Device() {
n[0] = 0.0;
n[1] = 0.0;
n[2] = 0.0;
}
__device__ Vec3f_Device(fp_t n0) {
n[0] = n0;
n[1] = n0;
n[2] = n0;
}
__device__ Vec3f_Device(fp_t n0, fp_t n1, fp_t n2) {
n[0] = n0;
n[1] = n1;
n[2] = n2;
}
__device__ Vec3f_Device(const Vec3f_Device &v) {
n[0] = v.n[0];
n[1] = v.n[1];
n[2] = v.n[2];
}
// operator
__device__ fp_t operator [] ( int i ) const { return n[i]; }
__device__ fp_t& operator [] ( int i ) { return n[i]; }
__device__ const Vec3f_Device& operator + () const { return *this; }
__device__ Vec3f_Device operator - () const { return Vec3f_Device(-n[0], -n[1], -n[2]); }
__device__ Vec3f_Device& operator = ( const Vec3f_Device &v ) { n[0] = v.n[0]; n[1] = v.n[1]; n[2] = v.n[2]; return *this; }
__device__ Vec3f_Device& operator += ( const Vec3f_Device &v ) { n[0] += v.n[0]; n[1] += v.n[1]; n[2] += v.n[2]; return *this; }
__device__ Vec3f_Device& operator -= ( const Vec3f_Device &v ) { n[0] -= v.n[0]; n[1] -= v.n[1]; n[2] -= v.n[2]; return *this; }
__device__ Vec3f_Device& operator *= ( const Vec3f_Device &v ) { n[0] *= v.n[0]; n[1] *= v.n[1]; n[2] *= v.n[2]; return *this; }
__device__ Vec3f_Device& operator /= ( const Vec3f_Device &v ) { n[0] /= v.n[0]; n[1] /= v.n[1]; n[2] /= v.n[2]; return *this; }
__device__ Vec3f_Device& operator *= ( const fp_t d ) { n[0] *= d; n[1] *= d; n[2] *= d; return *this; }
__device__ Vec3f_Device& operator /= ( const fp_t d ) { n[0] /= d; n[1] /= d; n[2] /= d; return *this; }
// Math Operation
// TODO: clamp (0.0 <= n[i] <= 1.0)
Vec3f_Device __device__ clamp(fp_t val_min, fp_t val_max) const {
return Vec3f_Device(
max(val_min, min(n[0], val_max)),
max(val_min, min(n[1], val_max)),
max(val_min, min(n[2], val_max)));
}
bool __device__ isZero() const {
return n[0] == 0 && n[1] == 0 && n[2] == 0;
}
Vec3f_Device __device__ cross(const Vec3f_Device &v) const {
return Vec3f_Device(
n[1] * v[2] - n[2] * v[1],
n[2] * v[0] - n[0] * v[2],
n[0] * v[1] - n[1] * v[0] );
}
fp_t __device__ dot(const Vec3f_Device &v) const {
return n[0] * v[0] + n[1] * v[1] + n[2] * v[2];
}
Vec3f_Device __device__ prod(const Vec3f_Device &v) const {
return Vec3f_Device(n[0] * v[0], n[1] * v[1], n[2] * v[2]);
}
// reference
// 1. https://en.wikipedia.org/wiki/Vector_projection
fp_t __device__ projectLength(const Vec3f_Device &v) const {
fp_t dot_result = dot(v);
fp_t length_v2 = v.lengthSquared();
return dot_result / length_v2;
}
// reference
// 1. https://en.wikipedia.org/wiki/Vector_projection
// projection of a on b = a.dot(b) / b.lengthSquared() * b
Vec3f_Device __device__ projectOn(const Vec3f_Device &v) const {
fp_t length = projectLength(v);
return Vec3f_Device(length * v[0], length * v[1], length * v[2]);
}
Vec3f_Device __device__ normalize() const {
Vec3f_Device ret (*this);
ret /= length();
return ret;
}
fp_t __device__ length() const {
return sqrt(lengthSquared());
}
fp_t __device__ lengthSquared() const {
return n[0] * n[0] + n[1] * n[1] + n[2] * n[2];
}
};
inline __device__ Vec3f_Device operator+(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2]);
}
inline __device__ Vec3f_Device operator-(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]);
}
inline __device__ Vec3f_Device operator*(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] * v2[0], v1[1] * v2[1], v1[2] * v2[2]);
}
inline __device__ Vec3f_Device operator/(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] / v2[0], v1[1] / v2[1], v1[2] / v2[2]);
}
inline __device__ Vec3f_Device operator*(fp_t t, const Vec3f_Device &v) {
return Vec3f_Device(t * v[0], t * v[1], t * v[2]);
}
inline __device__ Vec3f_Device operator/(Vec3f_Device v, fp_t t) {
return Vec3f_Device(v[0] / t, v[1] / t, v[2] / t);
}
inline __device__ Vec3f_Device operator*(const Vec3f_Device &v, fp_t t) {
return Vec3f_Device(t * v[0], t * v[1], t * v[2]);
}
inline __device__ bool operator==(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return v1[0] == v2[0] && v1[1] == v2[1] && v1[2] == v2[2];
}
struct Ray_Device {
Vec3f_Device pos;
Vec3f_Device dir;
__device__ Vec3f_Device getPosition() const {
return pos;
}
__device__ Vec3f_Device getDirection() const {
return dir;
}
};
class Hitable_Sphere_Device {
// Data
public:
fp_t radius;
Vec3f_Device center;
// Operation
public:
__device__ bool intersect(Vec3f_Device &normal, fp_t *distance, const Ray_Device *ray, fp_t t_min, fp_t t_max) const {
Vec3f_Device oc = ray->pos - center;
fp_t a = ray->dir.dot(ray->dir);
fp_t b = oc.dot(ray->dir);
fp_t c = oc.dot(oc) - radius * radius;
fp_t discriminant = b * b - a * c;
// no intersection
if (discriminant <= 0) return false;
// find the length of the ray
// check if the ray is hit within the range
fp_t ray_length;
ray_length = (-b - sqrt(b * b - a * c)) / a;
if (ray_length < t_max && ray_length > t_min) goto RAY_HIT;
ray_length = (-b + sqrt(b * b - a * c)) / a;
if (ray_length < t_max && ray_length > t_min) goto RAY_HIT;
return false;
// ray hit the object within the range
// need to set the content of hit record
RAY_HIT:
*distance = ray_length;
normal = (ray->pos + ray_length * ray->dir).normalize();
return true;
}
};
class Hitable_Sphere {
// Data
public:
fp_t radius;
Vec3f center;
};
struct RecordRender {
int size_hitable;
int width;
int height;
};
// CUDA Function
__device__ float get_random(unsigned int *seed0, unsigned int *seed1) {
*seed0 = 36969 * ((*seed0) & 65535) + ((*seed0) >> 16); // hash the seeds using bitwise AND and bitshifts
*seed1 = 18000 * ((*seed1) & 65535) + ((*seed1) >> 16);
unsigned int ires = ((*seed0) << 16) + (*seed1);
// convert to float
union {
float f;
unsigned int ui;
} res;
res.ui = (ires & 0x007fffff) | 0x40000000; // bitwise AND, bitwise OR
return (res.f - 2.f) / 2.f;
}
__device__ bool intersect_scene(const Ray_Device *ray, Hitable_Sphere_Device *scene, int size_hitable) {
for (int i = 0; i < size_hitable; ++i) {
Vec3f_Device normal;
fp_t distance;
// TODO: select a better t_max
if (!scene[i].intersect(normal, &distance, ray, 0, 8192.0f)) continue;
return true;
}
return false;
}
// __global__: executed on the device (GPU) and callable onbly from host (CPU)
// this kernel runs in parallel on all the CUDA threads
__global__ void render_kernel(Vec3f_Device *output, Hitable_Sphere_Device *scene, RecordRender *record_render) {
// assign a CUDA thread to every pixel (x, y)
// blockIdx, blockDim, threadIdex are CUDA specific keywords
// replaces nested outer loops in CPU code looping over image rows and image columns
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// int i = (record_render->height - y - 1) * record_render->width + x; // index of current pixel (calculated using thread index)
int i = y * record_render->width + x;
// unsigned int s1 = x; // seeds for random number generator
// unsigned int s2 = y;
// generate ray directed at lower left corner of the screen
// compute direction of all other rays by adding cx and cy increments in x and y direction
Ray_Device ray;
ray.pos = Vec3f_Device(0.0f, 0.0f, 0.0f);
// ray.dir = Vec3f_Device(
// 0.0f + ((float)x - (record_render->width / 2)) / (record_render->width),
// 0.0f + ((float)y - (record_render->height / 2)) / (record_render->height),
// 1.0f);
Vec3f_Device result;
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
ray.dir = Vec3f_Device(
0.0f + ((float)x - (record_render->width / 2) + ((float)i - 4.0) / 8) / (record_render->width),
0.0f + ((float)y - (record_render->height / 2) + ((float)j - 4.0) / 8) / (record_render->height),
1.0f);
result += intersect_scene(&ray, scene, record_render->size_hitable) ? Vec3f_Device(1.0 / 64.0, 1.0 / 64.0, 1.0 / 64.0) : Vec3f_Device();
}
}
output[i] = result;
}
int main(void) {
// MARK: message
printf("Main started \n");
const int width = 128;
const int height = 128;
// data - output
printf("Data: Output \n");
Vec3f *output_host;
Vec3f_Device *output_device;
output_host = new Vec3f[width * height];
hipMalloc(&output_device, width * height * sizeof(Vec3f_Device));
// data - scene
printf("Data: Scene \n");
Hitable_Sphere *hitable_host;
Hitable_Sphere_Device *hitable_device;
hitable_host = new Hitable_Sphere[1];
hitable_host[0].radius = 0.5;
hitable_host[0].center = Vec3f(0, 0, 2);
hipMalloc(&hitable_device, 1 * sizeof(Hitable_Sphere_Device));
hipMemcpy(hitable_device, hitable_host, 1 * sizeof(Hitable_Sphere_Device), hipMemcpyHostToDevice);
// data - render
printf("Data: Render \n");
RecordRender record_render_host;
RecordRender *record_render_device;
record_render_host.size_hitable = 1;
record_render_host.width = width;
record_render_host.height = height;
hipMalloc(&record_render_device, sizeof(RecordRender));
hipMemcpy(record_render_device, &record_render_host, sizeof(RecordRender), hipMemcpyHostToDevice);
// dim3 is CUDA specific type, block and grid are required to schedule CUDA threads over streaming multiprocessors
dim3 threads = dim3(8, 8);
dim3 blocks = dim3(width / threads.x, height / threads.y);
// MARK: messaging
printf("CUDA initiaized \n");
printf("Start rendering... \n");
// record time
float gpu_time = 0.0f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// StopWatchInterface *timer = NULL;
// sdkCreateTimer(&timer);
// sdkResetTimer(&timer);
// checkCudaErrors(hipDeviceSynchronize());
// schedule threads on device and launch CUDA kernel from host
// then copy result of computation from device back to host
// sdkStartTimer(&timer);
hipEventRecord(start);
hipLaunchKernelGGL(( render_kernel), dim3(blocks), dim3(threads), 0, 0, output_device, hitable_device, record_render_device);
checkCudaErrors(hipMemcpy(output_host, output_device, width * height * sizeof(Vec3f_Device), hipMemcpyDeviceToHost));
// sdkStopTimer(&timer);
hipEventRecord(stop);
hipEventSynchronize(stop);
// show time
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
printf("Time elapsed: %f \n", gpu_time);
hipEventDestroy(start);
hipEventDestroy(stop);
// free CUDA memory
checkCudaErrors(hipFree(output_device));
checkCudaErrors(hipFree(hitable_device));
checkCudaErrors(hipFree(record_render_device));
// MARK: messaging
printf("Done\n");
// write image to PPM file
FILE *f = fopen("result.ppm", "w");
fprintf(f, "P3\n%d %d\n%d\n", width, height, 255);
// loop over pixels, write RGB
for (int i = 0; i < width * height; ++i) {
fprintf(f, "%d %d %d \n",
(int)(output_host[i][0] * 255),
(int)(output_host[i][1] * 255),
(int)(output_host[i][2] * 255));
}
printf("Saved image to 'result.ppm'\n");
delete[] output_host;
delete[] hitable_host;
printf("Done \n");
return 0;
}
| 162a5988303ac58d88a1576eea8695a091c3054b.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "RayTracer_Vec3f.hpp"
typedef float fp_t;
// typedef float3 Vec3f_Device;
template <class T>
inline __device__ T max(T x, T y) {
return x > y ? x : y;
}
template <class T>
inline __device__ T min(T x, T y) {
return x < y ? x : y;
}
// Data Structure
class Vec3f_Device {
// Data
public:
fp_t n[3];
// Operation
public:
// init
__device__ Vec3f_Device() {
n[0] = 0.0;
n[1] = 0.0;
n[2] = 0.0;
}
__device__ Vec3f_Device(fp_t n0) {
n[0] = n0;
n[1] = n0;
n[2] = n0;
}
__device__ Vec3f_Device(fp_t n0, fp_t n1, fp_t n2) {
n[0] = n0;
n[1] = n1;
n[2] = n2;
}
__device__ Vec3f_Device(const Vec3f_Device &v) {
n[0] = v.n[0];
n[1] = v.n[1];
n[2] = v.n[2];
}
// operator
__device__ fp_t operator [] ( int i ) const { return n[i]; }
__device__ fp_t& operator [] ( int i ) { return n[i]; }
__device__ const Vec3f_Device& operator + () const { return *this; }
__device__ Vec3f_Device operator - () const { return Vec3f_Device(-n[0], -n[1], -n[2]); }
__device__ Vec3f_Device& operator = ( const Vec3f_Device &v ) { n[0] = v.n[0]; n[1] = v.n[1]; n[2] = v.n[2]; return *this; }
__device__ Vec3f_Device& operator += ( const Vec3f_Device &v ) { n[0] += v.n[0]; n[1] += v.n[1]; n[2] += v.n[2]; return *this; }
__device__ Vec3f_Device& operator -= ( const Vec3f_Device &v ) { n[0] -= v.n[0]; n[1] -= v.n[1]; n[2] -= v.n[2]; return *this; }
__device__ Vec3f_Device& operator *= ( const Vec3f_Device &v ) { n[0] *= v.n[0]; n[1] *= v.n[1]; n[2] *= v.n[2]; return *this; }
__device__ Vec3f_Device& operator /= ( const Vec3f_Device &v ) { n[0] /= v.n[0]; n[1] /= v.n[1]; n[2] /= v.n[2]; return *this; }
__device__ Vec3f_Device& operator *= ( const fp_t d ) { n[0] *= d; n[1] *= d; n[2] *= d; return *this; }
__device__ Vec3f_Device& operator /= ( const fp_t d ) { n[0] /= d; n[1] /= d; n[2] /= d; return *this; }
// Math Operation
// TODO: clamp (0.0 <= n[i] <= 1.0)
Vec3f_Device __device__ clamp(fp_t val_min, fp_t val_max) const {
return Vec3f_Device(
max(val_min, min(n[0], val_max)),
max(val_min, min(n[1], val_max)),
max(val_min, min(n[2], val_max)));
}
bool __device__ isZero() const {
return n[0] == 0 && n[1] == 0 && n[2] == 0;
}
Vec3f_Device __device__ cross(const Vec3f_Device &v) const {
return Vec3f_Device(
n[1] * v[2] - n[2] * v[1],
n[2] * v[0] - n[0] * v[2],
n[0] * v[1] - n[1] * v[0] );
}
fp_t __device__ dot(const Vec3f_Device &v) const {
return n[0] * v[0] + n[1] * v[1] + n[2] * v[2];
}
Vec3f_Device __device__ prod(const Vec3f_Device &v) const {
return Vec3f_Device(n[0] * v[0], n[1] * v[1], n[2] * v[2]);
}
// reference
// 1. https://en.wikipedia.org/wiki/Vector_projection
fp_t __device__ projectLength(const Vec3f_Device &v) const {
fp_t dot_result = dot(v);
fp_t length_v2 = v.lengthSquared();
return dot_result / length_v2;
}
// reference
// 1. https://en.wikipedia.org/wiki/Vector_projection
// projection of a on b = a.dot(b) / b.lengthSquared() * b
Vec3f_Device __device__ projectOn(const Vec3f_Device &v) const {
fp_t length = projectLength(v);
return Vec3f_Device(length * v[0], length * v[1], length * v[2]);
}
Vec3f_Device __device__ normalize() const {
Vec3f_Device ret (*this);
ret /= length();
return ret;
}
fp_t __device__ length() const {
return sqrt(lengthSquared());
}
fp_t __device__ lengthSquared() const {
return n[0] * n[0] + n[1] * n[1] + n[2] * n[2];
}
};
inline __device__ Vec3f_Device operator+(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2]);
}
inline __device__ Vec3f_Device operator-(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]);
}
inline __device__ Vec3f_Device operator*(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] * v2[0], v1[1] * v2[1], v1[2] * v2[2]);
}
inline __device__ Vec3f_Device operator/(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return Vec3f_Device(v1[0] / v2[0], v1[1] / v2[1], v1[2] / v2[2]);
}
inline __device__ Vec3f_Device operator*(fp_t t, const Vec3f_Device &v) {
return Vec3f_Device(t * v[0], t * v[1], t * v[2]);
}
inline __device__ Vec3f_Device operator/(Vec3f_Device v, fp_t t) {
return Vec3f_Device(v[0] / t, v[1] / t, v[2] / t);
}
inline __device__ Vec3f_Device operator*(const Vec3f_Device &v, fp_t t) {
return Vec3f_Device(t * v[0], t * v[1], t * v[2]);
}
inline __device__ bool operator==(const Vec3f_Device &v1, const Vec3f_Device &v2) {
return v1[0] == v2[0] && v1[1] == v2[1] && v1[2] == v2[2];
}
struct Ray_Device {
Vec3f_Device pos;
Vec3f_Device dir;
__device__ Vec3f_Device getPosition() const {
return pos;
}
__device__ Vec3f_Device getDirection() const {
return dir;
}
};
class Hitable_Sphere_Device {
// Data
public:
fp_t radius;
Vec3f_Device center;
// Operation
public:
__device__ bool intersect(Vec3f_Device &normal, fp_t *distance, const Ray_Device *ray, fp_t t_min, fp_t t_max) const {
Vec3f_Device oc = ray->pos - center;
fp_t a = ray->dir.dot(ray->dir);
fp_t b = oc.dot(ray->dir);
fp_t c = oc.dot(oc) - radius * radius;
fp_t discriminant = b * b - a * c;
// no intersection
if (discriminant <= 0) return false;
// find the length of the ray
// check if the ray is hit within the range
fp_t ray_length;
ray_length = (-b - sqrt(b * b - a * c)) / a;
if (ray_length < t_max && ray_length > t_min) goto RAY_HIT;
ray_length = (-b + sqrt(b * b - a * c)) / a;
if (ray_length < t_max && ray_length > t_min) goto RAY_HIT;
return false;
// ray hit the object within the range
// need to set the content of hit record
RAY_HIT:
*distance = ray_length;
normal = (ray->pos + ray_length * ray->dir).normalize();
return true;
}
};
class Hitable_Sphere {
// Data
public:
fp_t radius;
Vec3f center;
};
struct RecordRender {
int size_hitable;
int width;
int height;
};
// CUDA Function
__device__ float get_random(unsigned int *seed0, unsigned int *seed1) {
*seed0 = 36969 * ((*seed0) & 65535) + ((*seed0) >> 16); // hash the seeds using bitwise AND and bitshifts
*seed1 = 18000 * ((*seed1) & 65535) + ((*seed1) >> 16);
unsigned int ires = ((*seed0) << 16) + (*seed1);
// convert to float
union {
float f;
unsigned int ui;
} res;
res.ui = (ires & 0x007fffff) | 0x40000000; // bitwise AND, bitwise OR
return (res.f - 2.f) / 2.f;
}
__device__ bool intersect_scene(const Ray_Device *ray, Hitable_Sphere_Device *scene, int size_hitable) {
for (int i = 0; i < size_hitable; ++i) {
Vec3f_Device normal;
fp_t distance;
// TODO: select a better t_max
if (!scene[i].intersect(normal, &distance, ray, 0, 8192.0f)) continue;
return true;
}
return false;
}
// __global__: executed on the device (GPU) and callable onbly from host (CPU)
// this kernel runs in parallel on all the CUDA threads
__global__ void render_kernel(Vec3f_Device *output, Hitable_Sphere_Device *scene, RecordRender *record_render) {
// assign a CUDA thread to every pixel (x, y)
// blockIdx, blockDim, threadIdex are CUDA specific keywords
// replaces nested outer loops in CPU code looping over image rows and image columns
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// int i = (record_render->height - y - 1) * record_render->width + x; // index of current pixel (calculated using thread index)
int i = y * record_render->width + x;
// unsigned int s1 = x; // seeds for random number generator
// unsigned int s2 = y;
// generate ray directed at lower left corner of the screen
// compute direction of all other rays by adding cx and cy increments in x and y direction
Ray_Device ray;
ray.pos = Vec3f_Device(0.0f, 0.0f, 0.0f);
// ray.dir = Vec3f_Device(
// 0.0f + ((float)x - (record_render->width / 2)) / (record_render->width),
// 0.0f + ((float)y - (record_render->height / 2)) / (record_render->height),
// 1.0f);
Vec3f_Device result;
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
ray.dir = Vec3f_Device(
0.0f + ((float)x - (record_render->width / 2) + ((float)i - 4.0) / 8) / (record_render->width),
0.0f + ((float)y - (record_render->height / 2) + ((float)j - 4.0) / 8) / (record_render->height),
1.0f);
result += intersect_scene(&ray, scene, record_render->size_hitable) ? Vec3f_Device(1.0 / 64.0, 1.0 / 64.0, 1.0 / 64.0) : Vec3f_Device();
}
}
output[i] = result;
}
int main(void) {
// MARK: message
printf("Main started \n");
const int width = 128;
const int height = 128;
// data - output
printf("Data: Output \n");
Vec3f *output_host;
Vec3f_Device *output_device;
output_host = new Vec3f[width * height];
cudaMalloc(&output_device, width * height * sizeof(Vec3f_Device));
// data - scene
printf("Data: Scene \n");
Hitable_Sphere *hitable_host;
Hitable_Sphere_Device *hitable_device;
hitable_host = new Hitable_Sphere[1];
hitable_host[0].radius = 0.5;
hitable_host[0].center = Vec3f(0, 0, 2);
cudaMalloc(&hitable_device, 1 * sizeof(Hitable_Sphere_Device));
cudaMemcpy(hitable_device, hitable_host, 1 * sizeof(Hitable_Sphere_Device), cudaMemcpyHostToDevice);
// data - render
printf("Data: Render \n");
RecordRender record_render_host;
RecordRender *record_render_device;
record_render_host.size_hitable = 1;
record_render_host.width = width;
record_render_host.height = height;
cudaMalloc(&record_render_device, sizeof(RecordRender));
cudaMemcpy(record_render_device, &record_render_host, sizeof(RecordRender), cudaMemcpyHostToDevice);
// dim3 is CUDA specific type, block and grid are required to schedule CUDA threads over streaming multiprocessors
dim3 threads = dim3(8, 8);
dim3 blocks = dim3(width / threads.x, height / threads.y);
// MARK: messaging
printf("CUDA initiaized \n");
printf("Start rendering... \n");
// record time
float gpu_time = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// StopWatchInterface *timer = NULL;
// sdkCreateTimer(&timer);
// sdkResetTimer(&timer);
// checkCudaErrors(cudaDeviceSynchronize());
// schedule threads on device and launch CUDA kernel from host
// then copy result of computation from device back to host
// sdkStartTimer(&timer);
cudaEventRecord(start);
render_kernel<<<blocks, threads>>>(output_device, hitable_device, record_render_device);
checkCudaErrors(cudaMemcpy(output_host, output_device, width * height * sizeof(Vec3f_Device), cudaMemcpyDeviceToHost));
// sdkStopTimer(&timer);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// show time
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
printf("Time elapsed: %f \n", gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// free CUDA memory
checkCudaErrors(cudaFree(output_device));
checkCudaErrors(cudaFree(hitable_device));
checkCudaErrors(cudaFree(record_render_device));
// MARK: messaging
printf("Done\n");
// write image to PPM file
FILE *f = fopen("result.ppm", "w");
fprintf(f, "P3\n%d %d\n%d\n", width, height, 255);
// loop over pixels, write RGB
for (int i = 0; i < width * height; ++i) {
fprintf(f, "%d %d %d \n",
(int)(output_host[i][0] * 255),
(int)(output_host[i][1] * 255),
(int)(output_host[i][2] * 255));
}
printf("Saved image to 'result.ppm'\n");
delete[] output_host;
delete[] hitable_host;
printf("Done \n");
return 0;
}
|
00cafd25104071658165380e7b05ac1844f0c2d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Hello World in CUDA
*
* CS3210
*
* This program start from "hello world" string and should print "HELLO WORLD"
*
*/
#include <stdio.h>
#define N 32
// #define DISCRETE
__global__ void hello(char *a, int len)
{
int tid = threadIdx.x;
if (tid >= len)
return;
a[tid] += 'A' - 'a';
}
int main()
{
// original string
char a[N] = "hello@world";
// length
int len = strlen(a);
// pointer to the string on device
char* ad;
// pointer to the final string on host
char* ah;
// CUDA returned error code
hipError_t rc;
//allocate space for the string on device (GPU) memory
hipMalloc((void**)&ad, N);
hipMemcpy(ad, a, N, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( hello), dim3(1), dim3(N), 0, 0, ad, len);
hipDeviceSynchronize();
// for discrete GPUs, get the data from device memory to host memory
hipMemcpy(a, ad, N, hipMemcpyDeviceToHost);
ah = a;
// was there any error?
rc = hipGetLastError();
if (rc != hipSuccess)
printf("Last CUDA error %s\n", hipGetErrorString(rc));
// print final string
printf("%s!\n", ah);
// free memory
hipFree(ad);
return 0;
}
| 00cafd25104071658165380e7b05ac1844f0c2d6.cu | /*
* Hello World in CUDA
*
* CS3210
*
* This program start from "hello world" string and should print "HELLO WORLD"
*
*/
#include <stdio.h>
#define N 32
// #define DISCRETE
__global__ void hello(char *a, int len)
{
int tid = threadIdx.x;
if (tid >= len)
return;
a[tid] += 'A' - 'a';
}
int main()
{
// original string
char a[N] = "hello@world";
// length
int len = strlen(a);
// pointer to the string on device
char* ad;
// pointer to the final string on host
char* ah;
// CUDA returned error code
cudaError_t rc;
//allocate space for the string on device (GPU) memory
cudaMalloc((void**)&ad, N);
cudaMemcpy(ad, a, N, cudaMemcpyHostToDevice);
// launch the kernel
hello<<<1, N>>>(ad, len);
cudaDeviceSynchronize();
// for discrete GPUs, get the data from device memory to host memory
cudaMemcpy(a, ad, N, cudaMemcpyDeviceToHost);
ah = a;
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// print final string
printf("%s!\n", ah);
// free memory
cudaFree(ad);
return 0;
}
|
Test_doRestriction_GPU.hip | // !!! This is a file automatically generated by hipify!!!
/*
nvcc -arch=compute_52 -code=sm_52 -O3 --compiler-options -fopenmp Test_doRestriction_GPU.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <hip/hip_runtime.h>
__align__(8) texture<float> texMem_float;
__global__ void ker_Zoom_GPU(int N, float h_n, int M, float h_m, float *U_m){
int index_m = blockDim.x * blockIdx.x + threadIdx.x;
int index_n;
int ix_m, iy_m;
int ix_n, iy_n;
float a, c; // the ratio of the coarse grid point to the first met lower left fine grid index in x-dir, y-dir
// Should be between 0 <= a,c < 1
float b, d; // ratio
float bl, br, tl, tr; // value of the bottom left/right and top left/right
while( index_m < M*M ){
// Parse the index_m
ix_m = index_m % M;
iy_m = index_m / M;
// Ignore the boundary
if( (ix_m == 0) || (ix_m == M-1) || (iy_m == 0) || (iy_m == M-1) ){
// Do nothing
}
else{
// Calculate the ratio and the lower left grid_n index
ix_n = (int) floorf((float)ix_m * h_m / h_n);
iy_n = (int) floorf((float)iy_m * h_m / h_n);
index_n = ix_n + iy_n * N;
a = fmodf((float)ix_m * h_m, h_n) / h_n;
c = fmodf((float)iy_m * h_m, h_n) / h_n;
b = 1.0 - a;
d = 1.0 - c;
// Fetch the value
bl = tex1Dfetch(texMem_float, index_n);
br = tex1Dfetch(texMem_float, index_n + 1);
tl = tex1Dfetch(texMem_float, index_n + N);
tr = tex1Dfetch(texMem_float, index_n + N + 1);
// Zooming and store inside U_m
U_m[index_m] = b * d * bl + a * d * br + c * b * tl + a * c * tr;
}
// Stride
index_m = index_m + blockDim.x * gridDim.x;
}
}
void doRestriction_GPU(int N, double *U_f, int M, double *U_c){
// Settings
double h_f = 1.0 / (double) (N - 1); // spacing in finer grid
double h_c = 1.0 / (double) (M - 1); // spacing in coarser grid
// Settings for GPU
int blocksPerGrid = 10;
int threadsPerBlock = 10;
float *d_Uf, *d_Uc;
float *h_Uf, *h_Uc;
/*
CPU Part
*/
// Allocate host memory
h_Uf = (float*) malloc(N * N * sizeof(float));
h_Uc = (float*) malloc(M * M * sizeof(float));
// Transfer data from double to float
# pragma omp parallel for
for(int i = 0; i < N*N; i = i+1){
h_Uf[i] = (float) U_f[i];
}
/*
GPU Part
*/
// Allocate device memory
hipMalloc((void**)&d_Uf, N * N * sizeof(float));
hipMalloc((void**)&d_Uc, M * M * sizeof(float));
// Bind d_Uf to texture memory
hipBindTexture(NULL, texMem_float, d_Uf, N * N * sizeof(float));
// Copy data to device memory and initialize d_Uc as zeros
hipMemcpy(d_Uf, h_Uf, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemset(d_Uc, 0.0, M * M * sizeof(float));
free(h_Uf); // h_Uf is no longer needed
// Call the kernel
hipLaunchKernelGGL(( ker_Zoom_GPU) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, N, (float)h_f, M, (float)h_c, d_Uc);
// Copy data back to host memory
hipMemcpy(h_Uc, d_Uc, M * M * sizeof(float), hipMemcpyDeviceToHost);
// Unbind texture memory and free the device memory
hipUnbindTexture(texMem_float);
hipFree(d_Uf);
hipFree(d_Uc);
// Transfer data from float to double
# pragma omp parallel for
for(int i = 0; i < M*M; i = i+1){
U_c[i] = (double) h_Uc[i];
}
free(h_Uc);
}
void doRestriction(int N, double *U_f, int M, double *U_c){
int ix_f, iy_f; // the lower left index needed from the fine grid
double a, c; // the ratio of the coarse grid point to the first met lower left fine grid index in x-dir, y-dir
// Should be between 0 <= a,c < 1
double b, d; // ratio
int index_c; // index of the coarse grid inside the 1D-array address
int index_f; // index of the fine grid inside the 1D-array address
double h_f = 1.0 / (double)(N - 1); // the delta x of the discretized fine grid.
double h_c = 1.0 / (double)(M - 1); // the delta x of the discretized coarse grid.
// Initialize Coarse Grid, set 0 to all
memset(U_c, 0.0, M * M * sizeof(double));
// Run through the coarse grid and do restriction,
// but without the boundary, since it is all "0"
# pragma omp for private(ix_f, iy_f, a, c, b, d, index_c, index_f)
for(int iy_c = 1; iy_c < (M-1); iy_c = iy_c+1){
for(int ix_c = 1; ix_c < (M-1); ix_c = ix_c+1){
// Calculate the ratio and the lower left fine grid index
ix_f = (int) floor((double)ix_c * h_c / h_f);
iy_f = (int) floor((double)iy_c * h_c / h_f);
a = fmod((double)ix_c * h_c, h_f) / h_f;
c = fmod((double)iy_c * h_c, h_f) / h_f;
b = 1.0 - a;
d = 1.0 - c;
// DEBUG info
// printf("ix_f = %d, iy_f = %d\n", ix_f, iy_f);
// printf("a = %.3lf, b = %.3lf\n", a, b);
// printf("c = %.3lf, d = %.3lf\n", c, d);
// Calculate the coarse grid value
index_f = ix_f + iy_f * N;
index_c = ix_c + iy_c * M;
U_c[index_c] = b * d * U_f[index_f] + a * d * U_f[index_f+1] + c * b * U_f[index_f+N] + a * c * U_f[index_f+N+1];
}
}
}
void doPrint(int N, double *U){
for(int j = N-1; j >= 0; j = j-1){
for(int i = 0; i < N; i = i+1){
printf("%2.3e ", U[i+N*j]);
}
printf("\n");
}
}
int main( int argc, char *argv[] ){
int N, M;
double *Uf, *Uc;
double *Uc_CPU;
if(argc != 3){
N = 16;
M = 8;
}
else{
N = atoi(argv[1]);
M = atoi(argv[2]);
}
Uf = (double*) malloc(N * N * sizeof(double));
Uc = (double*) malloc(M * M * sizeof(double));
Uc_CPU = (double*) malloc(M * M * sizeof(double));
printf("~ Initialize the test doRestriction_GPU ~\n");
for(int i = 0; i < N; i = i+1){
for(int j = 0; j < N; j = j+1){
Uf[i + N*j] = (double) (i + j);
}
}
printf("Uf = \n");
doPrint(N, Uf);
hipEvent_t start, stop;
hipError_t err;
err = hipSetDevice( 0 );
if(err != hipSuccess){
printf("Cannot select GPU\n");
exit(1);
}
float gpu_time_use;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
printf("~Run the test~\n");
doRestriction_GPU(N, Uf, M, Uc);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_time_use, start, stop);
printf("Uc = \n");
doPrint(M, Uc);
printf("GPU: TimeUsed = %lf\n", gpu_time_use);
printf("\n");
float cpu_time_use;
hipEventRecord(start, 0);
doRestriction(N, Uf, M, Uc_CPU);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cpu_time_use, start, stop);
printf("Uc_CPU = \n");
doPrint(M, Uc_CPU);
printf("CPU: TimeUsed = %lf\n", cpu_time_use);
printf("SpeedUp = %lf\n", cpu_time_use / gpu_time_use);
double diff = 0.0;
# pragma omp parallel for
for(int i = 0; i < M*M; i = i+1){
diff = diff + abs(Uc[i] - Uc_CPU[i]);
}
printf("norm(U - U_CPU) = %lf\n", diff / (double)(M*M));
return 0;
}
| Test_doRestriction_GPU.cu | /*
nvcc -arch=compute_52 -code=sm_52 -O3 --compiler-options -fopenmp Test_doRestriction_GPU.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <cuda_runtime.h>
__align__(8) texture<float> texMem_float;
__global__ void ker_Zoom_GPU(int N, float h_n, int M, float h_m, float *U_m){
int index_m = blockDim.x * blockIdx.x + threadIdx.x;
int index_n;
int ix_m, iy_m;
int ix_n, iy_n;
float a, c; // the ratio of the coarse grid point to the first met lower left fine grid index in x-dir, y-dir
// Should be between 0 <= a,c < 1
float b, d; // ratio
float bl, br, tl, tr; // value of the bottom left/right and top left/right
while( index_m < M*M ){
// Parse the index_m
ix_m = index_m % M;
iy_m = index_m / M;
// Ignore the boundary
if( (ix_m == 0) || (ix_m == M-1) || (iy_m == 0) || (iy_m == M-1) ){
// Do nothing
}
else{
// Calculate the ratio and the lower left grid_n index
ix_n = (int) floorf((float)ix_m * h_m / h_n);
iy_n = (int) floorf((float)iy_m * h_m / h_n);
index_n = ix_n + iy_n * N;
a = fmodf((float)ix_m * h_m, h_n) / h_n;
c = fmodf((float)iy_m * h_m, h_n) / h_n;
b = 1.0 - a;
d = 1.0 - c;
// Fetch the value
bl = tex1Dfetch(texMem_float, index_n);
br = tex1Dfetch(texMem_float, index_n + 1);
tl = tex1Dfetch(texMem_float, index_n + N);
tr = tex1Dfetch(texMem_float, index_n + N + 1);
// Zooming and store inside U_m
U_m[index_m] = b * d * bl + a * d * br + c * b * tl + a * c * tr;
}
// Stride
index_m = index_m + blockDim.x * gridDim.x;
}
}
void doRestriction_GPU(int N, double *U_f, int M, double *U_c){
// Settings
double h_f = 1.0 / (double) (N - 1); // spacing in finer grid
double h_c = 1.0 / (double) (M - 1); // spacing in coarser grid
// Settings for GPU
int blocksPerGrid = 10;
int threadsPerBlock = 10;
float *d_Uf, *d_Uc;
float *h_Uf, *h_Uc;
/*
CPU Part
*/
// Allocate host memory
h_Uf = (float*) malloc(N * N * sizeof(float));
h_Uc = (float*) malloc(M * M * sizeof(float));
// Transfer data from double to float
# pragma omp parallel for
for(int i = 0; i < N*N; i = i+1){
h_Uf[i] = (float) U_f[i];
}
/*
GPU Part
*/
// Allocate device memory
cudaMalloc((void**)&d_Uf, N * N * sizeof(float));
cudaMalloc((void**)&d_Uc, M * M * sizeof(float));
// Bind d_Uf to texture memory
cudaBindTexture(NULL, texMem_float, d_Uf, N * N * sizeof(float));
// Copy data to device memory and initialize d_Uc as zeros
cudaMemcpy(d_Uf, h_Uf, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_Uc, 0.0, M * M * sizeof(float));
free(h_Uf); // h_Uf is no longer needed
// Call the kernel
ker_Zoom_GPU <<< blocksPerGrid, threadsPerBlock >>> (N, (float)h_f, M, (float)h_c, d_Uc);
// Copy data back to host memory
cudaMemcpy(h_Uc, d_Uc, M * M * sizeof(float), cudaMemcpyDeviceToHost);
// Unbind texture memory and free the device memory
cudaUnbindTexture(texMem_float);
cudaFree(d_Uf);
cudaFree(d_Uc);
// Transfer data from float to double
# pragma omp parallel for
for(int i = 0; i < M*M; i = i+1){
U_c[i] = (double) h_Uc[i];
}
free(h_Uc);
}
void doRestriction(int N, double *U_f, int M, double *U_c){
int ix_f, iy_f; // the lower left index needed from the fine grid
double a, c; // the ratio of the coarse grid point to the first met lower left fine grid index in x-dir, y-dir
// Should be between 0 <= a,c < 1
double b, d; // ratio
int index_c; // index of the coarse grid inside the 1D-array address
int index_f; // index of the fine grid inside the 1D-array address
double h_f = 1.0 / (double)(N - 1); // the delta x of the discretized fine grid.
double h_c = 1.0 / (double)(M - 1); // the delta x of the discretized coarse grid.
// Initialize Coarse Grid, set 0 to all
memset(U_c, 0.0, M * M * sizeof(double));
// Run through the coarse grid and do restriction,
// but without the boundary, since it is all "0"
# pragma omp for private(ix_f, iy_f, a, c, b, d, index_c, index_f)
for(int iy_c = 1; iy_c < (M-1); iy_c = iy_c+1){
for(int ix_c = 1; ix_c < (M-1); ix_c = ix_c+1){
// Calculate the ratio and the lower left fine grid index
ix_f = (int) floor((double)ix_c * h_c / h_f);
iy_f = (int) floor((double)iy_c * h_c / h_f);
a = fmod((double)ix_c * h_c, h_f) / h_f;
c = fmod((double)iy_c * h_c, h_f) / h_f;
b = 1.0 - a;
d = 1.0 - c;
// DEBUG info
// printf("ix_f = %d, iy_f = %d\n", ix_f, iy_f);
// printf("a = %.3lf, b = %.3lf\n", a, b);
// printf("c = %.3lf, d = %.3lf\n", c, d);
// Calculate the coarse grid value
index_f = ix_f + iy_f * N;
index_c = ix_c + iy_c * M;
U_c[index_c] = b * d * U_f[index_f] + a * d * U_f[index_f+1] + c * b * U_f[index_f+N] + a * c * U_f[index_f+N+1];
}
}
}
void doPrint(int N, double *U){
for(int j = N-1; j >= 0; j = j-1){
for(int i = 0; i < N; i = i+1){
printf("%2.3e ", U[i+N*j]);
}
printf("\n");
}
}
int main( int argc, char *argv[] ){
int N, M;
double *Uf, *Uc;
double *Uc_CPU;
if(argc != 3){
N = 16;
M = 8;
}
else{
N = atoi(argv[1]);
M = atoi(argv[2]);
}
Uf = (double*) malloc(N * N * sizeof(double));
Uc = (double*) malloc(M * M * sizeof(double));
Uc_CPU = (double*) malloc(M * M * sizeof(double));
printf("~ Initialize the test doRestriction_GPU ~\n");
for(int i = 0; i < N; i = i+1){
for(int j = 0; j < N; j = j+1){
Uf[i + N*j] = (double) (i + j);
}
}
printf("Uf = \n");
doPrint(N, Uf);
cudaEvent_t start, stop;
cudaError_t err;
err = cudaSetDevice( 0 );
if(err != cudaSuccess){
printf("Cannot select GPU\n");
exit(1);
}
float gpu_time_use;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
printf("~Run the test~\n");
doRestriction_GPU(N, Uf, M, Uc);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time_use, start, stop);
printf("Uc = \n");
doPrint(M, Uc);
printf("GPU: TimeUsed = %lf\n", gpu_time_use);
printf("\n");
float cpu_time_use;
cudaEventRecord(start, 0);
doRestriction(N, Uf, M, Uc_CPU);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_time_use, start, stop);
printf("Uc_CPU = \n");
doPrint(M, Uc_CPU);
printf("CPU: TimeUsed = %lf\n", cpu_time_use);
printf("SpeedUp = %lf\n", cpu_time_use / gpu_time_use);
double diff = 0.0;
# pragma omp parallel for
for(int i = 0; i < M*M; i = i+1){
diff = diff + abs(Uc[i] - Uc_CPU[i]);
}
printf("norm(U - U_CPU) = %lf\n", diff / (double)(M*M));
return 0;
}
|
7fded2a6d3b3f1fc5bf6c58f3ba500bcb8c487d7.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| 7fded2a6d3b3f1fc5bf6c58f3ba500bcb8c487d7.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
5945567ff6ec7dd623eacb1e92d584568d734552.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define RADIUS 3
#define BLK_SIZE 256
#define NUM_ELEMENTS (BLK_SIZE * 32) // 256 * 32 = 8192
__global__ void stencil_1d(int *d_in, int *d_out){
int gindex = (blockIdx.x * blockDim.x) + threadIdx.x + RADIUS;
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++)
result += d_in[gindex + offset];
d_out[gindex - RADIUS] = result;
}
int main(void){
int h_in[ NUM_ELEMENTS + (2*RADIUS) ];
int h_out[ NUM_ELEMENTS ];
int *d_in, *d_out;
// Initialize host input values
for (int i=0; i<(NUM_ELEMENTS + 2*RADIUS); i++)
h_in[i] = 1;
// Allocate device global memory
hipMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int) );
hipMalloc( &d_out, NUM_ELEMENTS * sizeof(int) );
// Copy HOST -> DEVICE
hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice);
// Launch kernel
hipLaunchKernelGGL(( stencil_1d), dim3(NUM_ELEMENTS/BLK_SIZE), dim3(BLK_SIZE), 0, 0, d_in, d_out);
// NUM_ELEMENTS / BLK_SIZE = 8192 / 256 = 32
// BLK_SIZE = 256
// Copy result DEVICE -> HOST
hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), hipMemcpyDeviceToHost);
// Verify results
int err_cnt = 0;
for (int i=0; i<NUM_ELEMENTS; i++){
if (h_out[i] != 7){
printf("h_out[%d] == %d != 7\n", i, h_out[i]);
err_cnt++;
break;
}
}
if (err_cnt!=0){
printf("Wrong result\n");
}else{
printf("Success\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 5945567ff6ec7dd623eacb1e92d584568d734552.cu | #include <stdio.h>
#include <stdlib.h>
#define RADIUS 3
#define BLK_SIZE 256
#define NUM_ELEMENTS (BLK_SIZE * 32) // 256 * 32 = 8192
__global__ void stencil_1d(int *d_in, int *d_out){
int gindex = (blockIdx.x * blockDim.x) + threadIdx.x + RADIUS;
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++)
result += d_in[gindex + offset];
d_out[gindex - RADIUS] = result;
}
int main(void){
int h_in[ NUM_ELEMENTS + (2*RADIUS) ];
int h_out[ NUM_ELEMENTS ];
int *d_in, *d_out;
// Initialize host input values
for (int i=0; i<(NUM_ELEMENTS + 2*RADIUS); i++)
h_in[i] = 1;
// Allocate device global memory
cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int) );
cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int) );
// Copy HOST -> DEVICE
cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice);
// Launch kernel
stencil_1d<<< NUM_ELEMENTS/BLK_SIZE, BLK_SIZE>>>(d_in, d_out);
// NUM_ELEMENTS / BLK_SIZE = 8192 / 256 = 32
// BLK_SIZE = 256
// Copy result DEVICE -> HOST
cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost);
// Verify results
int err_cnt = 0;
for (int i=0; i<NUM_ELEMENTS; i++){
if (h_out[i] != 7){
printf("h_out[%d] == %d != 7\n", i, h_out[i]);
err_cnt++;
break;
}
}
if (err_cnt!=0){
printf("Wrong result\n");
}else{
printf("Success\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
dcf71e6788dd70c11e78991008dfe59ca464e358.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/cos_sim_functor.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x,
const T* y, const T* z, const T* dz,
const size_t rows, const size_t cols, T* dy) {
int grid_size = blockDim.x * gridDim.x;
T y_norm_data = y_norm[0];
for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows;
row_id += grid_size) {
T xy_norm_prod = x_norm[row_id] * y_norm_data;
T dz_data = dz[row_id];
T z_data = z[row_id];
const T* x_data = x + cols * row_id;
T reciprocal_xy_norm_prod = 1 / xy_norm_prod;
T y_norm_square = y_norm_data * y_norm_data;
T reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols; ++i) {
T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod -
z_data * y[i] * reciprocal_y_norm_square);
platform::CudaAtomicAdd(dy + i, dy_data);
}
}
}
template <typename T>
struct CosSimDyFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* x_norm,
const T* y_norm, const T* x, const T* y, const T* z,
const T* dz, const size_t rows, const size_t cols,
T* dy) const {
const int block_size = 512;
dim3 threads(block_size, 1);
dim3 grid((rows + block_size - 1) / block_size, 1);
hipLaunchKernelGGL(( CosSimDyKernel<T>), dim3(grid), dim3(threads), 0, ctx.stream(),
x_norm, y_norm, x, y, z, dz, rows, cols, dy);
}
};
template struct CosSimDyFunctor<platform::CUDADeviceContext, float>;
template struct CosSimDyFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| dcf71e6788dd70c11e78991008dfe59ca464e358.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/cos_sim_functor.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x,
const T* y, const T* z, const T* dz,
const size_t rows, const size_t cols, T* dy) {
int grid_size = blockDim.x * gridDim.x;
T y_norm_data = y_norm[0];
for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows;
row_id += grid_size) {
T xy_norm_prod = x_norm[row_id] * y_norm_data;
T dz_data = dz[row_id];
T z_data = z[row_id];
const T* x_data = x + cols * row_id;
T reciprocal_xy_norm_prod = 1 / xy_norm_prod;
T y_norm_square = y_norm_data * y_norm_data;
T reciprocal_y_norm_square = 1 / y_norm_square;
for (size_t i = 0; i < cols; ++i) {
T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod -
z_data * y[i] * reciprocal_y_norm_square);
platform::CudaAtomicAdd(dy + i, dy_data);
}
}
}
template <typename T>
struct CosSimDyFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* x_norm,
const T* y_norm, const T* x, const T* y, const T* z,
const T* dz, const size_t rows, const size_t cols,
T* dy) const {
const int block_size = 512;
dim3 threads(block_size, 1);
dim3 grid((rows + block_size - 1) / block_size, 1);
CosSimDyKernel<T><<<grid, threads, 0, ctx.stream()>>>(
x_norm, y_norm, x, y, z, dz, rows, cols, dy);
}
};
template struct CosSimDyFunctor<platform::CUDADeviceContext, float>;
template struct CosSimDyFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
5e005e9c9d53bcbead03938d625777bf794eae8a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/for_each.h>
#include <exception>
#include <sstream>
#include "nvstrings/NVStrings.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
#include "./NVStringsImpl.h"
// Like the other regex functors, this one has two modes: size/count calculation
// and then the operation itself (findall). This minimizes the inlining of
// the regex code while not causing divergence. Makes the code a bit messy
// but build times are reduced by half since only one regex find() is inlined.
// This column version is less intense than its record counterpart.
template <size_t stack_size>
struct findall_fn {
dreprog* prog;
custring_view_array d_strings;
int* d_counts;
bool bcompute_size_only{true};
int column;
thrust::pair<const char*, size_t>* d_indexes;
//
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if (!dstr) return;
if (!bcompute_size_only && (column >= d_counts[idx])) return;
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1, data2);
if (!bcompute_size_only) {
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
int spos = 0, nchars = (int)dstr->chars_count();
int epos = nchars, column_count = 0;
// prog->find(idx,dstr,spos,epos);
// for( int col=0; col <= column; ++c )
while (spos <= nchars) {
if (prog->find(idx, dstr, spos, epos) <= 0) break;
if (!bcompute_size_only && (column_count == column)) break;
spos = epos > spos ? epos : spos + 1;
epos = nchars;
++column_count;
// prog->find(idx,dstr,spos,epos);
}
if (bcompute_size_only)
d_counts[idx] = column_count;
else {
// this will be the string for this column
if (spos < epos) {
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos - spos);
} else { // create empty string instead of a null one
d_indexes[idx].first = dstr->data();
}
}
}
};
// same as findall but strings are returned organized in column-major
int NVStrings::findall(const char* pattern, std::vector<NVStrings*>& results)
{
if (pattern == 0) return -1;
unsigned int count = size();
if (count == 0) return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32, get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if (regex_insts > MAX_STACK_INSTS) {
if (!prog->alloc_relists(count)) {
std::ostringstream message;
message << "nvstrings::findall: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> counts(count, 0);
int* d_counts = counts.data().get();
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10))
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts});
else if (regex_insts <= 100)
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts});
else
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts});
int columns = *thrust::max_element(execpol->on(0), counts.begin(), counts.end());
// boundary case: if no columns, return one null column (issue #119)
if (columns == 0) results.push_back(new NVStrings(count));
// create columns of nvstrings
for (int col_idx = 0; col_idx < columns; ++col_idx) {
// build index for each string -- collect pointers and lengths
rmm::device_vector<thrust::pair<const char*, size_t>> indexes(count);
thrust::pair<const char*, size_t>* d_indexes = indexes.data().get();
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10))
thrust::for_each_n(
execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else if (regex_insts <= 100)
thrust::for_each_n(
execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else
thrust::for_each_n(
execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, false, col_idx, d_indexes});
NVStrings* column =
NVStrings::create_from_index((std::pair<const char*, size_t>*)d_indexes, count);
results.push_back(column);
}
dreprog::destroy(prog);
return (unsigned int)results.size();
}
| 5e005e9c9d53bcbead03938d625777bf794eae8a.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/for_each.h>
#include <exception>
#include <sstream>
#include "nvstrings/NVStrings.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
#include "./NVStringsImpl.h"
// Like the other regex functors, this one has two modes: size/count calculation
// and then the operation itself (findall). This minimizes the inlining of
// the regex code while not causing divergence. Makes the code a bit messy
// but build times are reduced by half since only one regex find() is inlined.
// This column version is less intense than its record counterpart.
template <size_t stack_size>
struct findall_fn {
dreprog* prog;
custring_view_array d_strings;
int* d_counts;
bool bcompute_size_only{true};
int column;
thrust::pair<const char*, size_t>* d_indexes;
//
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if (!dstr) return;
if (!bcompute_size_only && (column >= d_counts[idx])) return;
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1, data2);
if (!bcompute_size_only) {
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
int spos = 0, nchars = (int)dstr->chars_count();
int epos = nchars, column_count = 0;
// prog->find(idx,dstr,spos,epos);
// for( int col=0; col <= column; ++c )
while (spos <= nchars) {
if (prog->find(idx, dstr, spos, epos) <= 0) break;
if (!bcompute_size_only && (column_count == column)) break;
spos = epos > spos ? epos : spos + 1;
epos = nchars;
++column_count;
// prog->find(idx,dstr,spos,epos);
}
if (bcompute_size_only)
d_counts[idx] = column_count;
else {
// this will be the string for this column
if (spos < epos) {
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos - spos);
} else { // create empty string instead of a null one
d_indexes[idx].first = dstr->data();
}
}
}
};
// same as findall but strings are returned organized in column-major
int NVStrings::findall(const char* pattern, std::vector<NVStrings*>& results)
{
if (pattern == 0) return -1;
unsigned int count = size();
if (count == 0) return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32, get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if (regex_insts > MAX_STACK_INSTS) {
if (!prog->alloc_relists(count)) {
std::ostringstream message;
message << "nvstrings::findall: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> counts(count, 0);
int* d_counts = counts.data().get();
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10))
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts});
else if (regex_insts <= 100)
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts});
else
thrust::for_each_n(execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts});
int columns = *thrust::max_element(execpol->on(0), counts.begin(), counts.end());
// boundary case: if no columns, return one null column (issue #119)
if (columns == 0) results.push_back(new NVStrings(count));
// create columns of nvstrings
for (int col_idx = 0; col_idx < columns; ++col_idx) {
// build index for each string -- collect pointers and lengths
rmm::device_vector<thrust::pair<const char*, size_t>> indexes(count);
thrust::pair<const char*, size_t>* d_indexes = indexes.data().get();
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10))
thrust::for_each_n(
execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else if (regex_insts <= 100)
thrust::for_each_n(
execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else
thrust::for_each_n(
execpol->on(0),
thrust::make_counting_iterator<unsigned int>(0),
count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, false, col_idx, d_indexes});
NVStrings* column =
NVStrings::create_from_index((std::pair<const char*, size_t>*)d_indexes, count);
results.push_back(column);
}
dreprog::destroy(prog);
return (unsigned int)results.size();
}
|
a0ff67a5b6e9b605df6cffcca444eac44410b332.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include <device_launch_parameters.h>
#include <cstdio>
#include "utils.h"
__device__ int __d_min(int a, int b) {
return a < b ? a : b;
}
__device__ int __d_max(int a, int b) {
return a > b ? a : b;
}
__global__
void gaussian_blur(const unsigned char *const inputChannel,
unsigned char *const outputChannel,
int numRows, int numCols,
const float *const filter, const int filterWidth) {
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x,
absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
if (absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows) {
return;
}
int w = filterWidth / 2;
float out = 0;
for (int row = absolute_image_position_y - w; row <= absolute_image_position_y + w; ++row) {
// clamp to image boundary, but NOT just throw is away
int clamp_row = __d_min(__d_max(row, 0), numRows - 1);
for (int col = absolute_image_position_x - w; col <= absolute_image_position_x + w; ++col) {
// clamp to image boundary, but NOT just throw is away
int clamp_col = __d_min(__d_max(col, 0), numCols - 1);
// calc conv
int filter_row = row - (absolute_image_position_y - w),
filter_col = col - (absolute_image_position_x - w);
out += inputChannel[clamp_row * numCols + clamp_col] * filter[filter_row * filterWidth + filter_col];
}
}
// write to output channels
outputChannel[absolute_image_position_y * numCols + absolute_image_position_x] = (char) out;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4 *const inputImageRGBA,
int numRows,
int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel) {
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x,
absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
if (absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows) {
return;
} else {
int tid = absolute_image_position_y * numCols + absolute_image_position_x;
redChannel[tid] = inputImageRGBA[tid].x;
greenChannel[tid] = inputImageRGBA[tid].y;
blueChannel[tid] = inputImageRGBA[tid].z;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float *const h_filter, const size_t filterWidth) {
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 *const h_inputImageRGBA, uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth) {
const int THREAD_SIZE = 16;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(THREAD_SIZE, THREAD_SIZE, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / THREAD_SIZE + 1, numRows / THREAD_SIZE + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels << < gridSize, blockSize >> > (d_inputImageRGBA,
numRows, numCols,
d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur << < gridSize, blockSize >> > (d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << < gridSize, blockSize >> > (d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << < gridSize, blockSize >> > (d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels << < gridSize, blockSize >> > (d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| a0ff67a5b6e9b605df6cffcca444eac44410b332.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include <device_launch_parameters.h>
#include <cstdio>
#include "utils.h"
__device__ int __d_min(int a, int b) {
return a < b ? a : b;
}
__device__ int __d_max(int a, int b) {
return a > b ? a : b;
}
__global__
void gaussian_blur(const unsigned char *const inputChannel,
unsigned char *const outputChannel,
int numRows, int numCols,
const float *const filter, const int filterWidth) {
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x,
absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
if (absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows) {
return;
}
int w = filterWidth / 2;
float out = 0;
for (int row = absolute_image_position_y - w; row <= absolute_image_position_y + w; ++row) {
// clamp to image boundary, but NOT just throw is away
int clamp_row = __d_min(__d_max(row, 0), numRows - 1);
for (int col = absolute_image_position_x - w; col <= absolute_image_position_x + w; ++col) {
// clamp to image boundary, but NOT just throw is away
int clamp_col = __d_min(__d_max(col, 0), numCols - 1);
// calc conv
int filter_row = row - (absolute_image_position_y - w),
filter_col = col - (absolute_image_position_x - w);
out += inputChannel[clamp_row * numCols + clamp_col] * filter[filter_row * filterWidth + filter_col];
}
}
// write to output channels
outputChannel[absolute_image_position_y * numCols + absolute_image_position_x] = (char) out;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4 *const inputImageRGBA,
int numRows,
int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel) {
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x,
absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
if (absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows) {
return;
} else {
int tid = absolute_image_position_y * numCols + absolute_image_position_x;
redChannel[tid] = inputImageRGBA[tid].x;
greenChannel[tid] = inputImageRGBA[tid].y;
blueChannel[tid] = inputImageRGBA[tid].z;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float *const h_filter, const size_t filterWidth) {
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 *const h_inputImageRGBA, uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth) {
const int THREAD_SIZE = 16;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(THREAD_SIZE, THREAD_SIZE, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / THREAD_SIZE + 1, numRows / THREAD_SIZE + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels << < gridSize, blockSize >> > (d_inputImageRGBA,
numRows, numCols,
d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur << < gridSize, blockSize >> > (d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << < gridSize, blockSize >> > (d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << < gridSize, blockSize >> > (d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels << < gridSize, blockSize >> > (d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
173f4118f4d25f1f77e74ee968807b2f010b1566.hip | // !!! This is a file automatically generated by hipify!!!
//
// Average extreme spread of five-shot group assuming impact coordinates follow standard normal distribution
//
// Building:
// nvcc -std=c++11 es_cuda.cu -o es_cuda -lcurand
//
// Running:
// for run in {1..10}; do ./es_cuda 15 | tee -a es_cuda.csv; done
//
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
#include <hiprand/hiprand.h>
namespace cg = cooperative_groups;
using std::string;
using std::vector;
// First level of reduction
__device__ double reduce_sum(double in, cg::thread_block cta)
{
extern __shared__ double sdata[];
// Write to shared memory
unsigned ltid = threadIdx.x;
sdata[ltid] = in;
cg::sync(cta);
// Do reduction in shared memory
for (unsigned s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid] += sdata[ltid + s];
}
cg::sync(cta);
}
return sdata[0];
}
// Estimator kernel
__global__ void computeValue(double* const results,
const double* const points,
const unsigned numGroups)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Determine thread ID
unsigned bid = blockIdx.x;
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const double* pointx = points + tid;
const double* pointy = pointx + 5 * numGroups;
// Sum up extreme spread of all groups
double sum = 0;
for (unsigned i = tid ; i < numGroups; i += step, pointx += step * 5, pointy += step * 5) {
// Pairwise distances
double dx[10], dy[10];
// Unroll nested comparison loops
dx[0] = pointx[0] - pointx[1]; dy[0] = pointy[0] - pointy[1];
dx[1] = pointx[0] - pointx[2]; dy[1] = pointy[0] - pointy[2];
dx[2] = pointx[0] - pointx[3]; dy[2] = pointy[0] - pointy[3];
dx[3] = pointx[0] - pointx[4]; dy[3] = pointy[0] - pointy[4];
dx[4] = pointx[1] - pointx[2]; dy[4] = pointy[1] - pointy[2];
dx[5] = pointx[1] - pointx[3]; dy[5] = pointy[1] - pointy[3];
dx[6] = pointx[1] - pointx[4]; dy[6] = pointy[1] - pointy[4];
dx[7] = pointx[2] - pointx[3]; dy[7] = pointy[2] - pointy[3];
dx[8] = pointx[2] - pointx[4]; dy[8] = pointy[2] - pointy[4];
dx[9] = pointx[3] - pointx[4]; dy[9] = pointy[3] - pointy[4];
double max_d2 = 0;
for (unsigned j = 0; j < 10; j++) {
auto candidate_d2 = dx[j] * dx[j] + dy[j] * dy[j];
max_d2 = max(max_d2, candidate_d2);
}
double es = sqrt(max_d2);
sum += es;
}
// Reduce within the block
sum = reduce_sum(sum, cta);
// Store the result
if (threadIdx.x == 0) {
results[bid] = sum;
}
}
double es_cuda(unsigned power_of_4, unsigned seed)
{
// Get device properties
struct hipDeviceProp_t deviceProperties;
hipError_t cudaResult = hipGetDeviceProperties(&deviceProperties, 0);
if (cudaResult != hipSuccess) {
string msg("Could not get device properties: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
throw std::runtime_error("Device does not have double precision support");
}
// Check requested size is valid
const unsigned threadBlockSize = 128;
if (threadBlockSize > (deviceProperties.maxThreadsPerBlock)) {
throw std::runtime_error("Thread block size is greater than maxThreadsPerBlock");
}
dim3 block;
block.x = threadBlockSize;
// Attach to GPU
cudaResult = hipSetDevice(0);
if (cudaResult != hipSuccess) {
string msg("Could not set CUDA device: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
dim3 grid;
const unsigned numGroups = 1 << (2 * power_of_4);
grid.x = numGroups / threadBlockSize;
while (grid.x > 20 * deviceProperties.multiProcessorCount) {
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
struct hipFuncAttributes funcAttributes;
cudaResult = hipFuncGetAttributes(&funcAttributes, computeValue);
if (cudaResult != hipSuccess) {
string msg("Could not get function attributes: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned)funcAttributes.maxThreadsPerBlock) {
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned)deviceProperties.maxThreadsDim[0]) {
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned)deviceProperties.maxGridSize[0]) {
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has ten random numbers to give five pairs of X and Y coordinates
double* d_points = 0;
cudaResult = hipMalloc((void **)&d_points, 10 * numGroups * sizeof(double));
if (cudaResult != hipSuccess) {
string msg("Could not allocate memory on device for random numbers: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
double* d_results = 0;
cudaResult = hipMalloc((void**)&d_results, grid.x * sizeof(double));
if (cudaResult != hipSuccess) {
string msg("Could not allocate memory on device for partial results: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points
hiprandStatus_t curandResult;
hiprandGenerator_t prng;
curandResult = hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not create pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandSetPseudoRandomGeneratorSeed(prng, seed);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not set seed for pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandGenerateNormalDouble(prng, (double*)d_points, 10 * numGroups, 0, 1);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not generate pseudo-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = hiprandDestroyGenerator(prng);
if (curandResult != HIPRAND_STATUS_SUCCESS) {
string msg("Could not destroy pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Calculate and average group size
hipLaunchKernelGGL(( computeValue), dim3(grid), dim3(block), block.x * sizeof(double), 0, d_results, d_points, numGroups);
// Copy the results back to host
vector<double> results(grid.x);
cudaResult = hipMemcpy(&results[0], d_results, grid.x * sizeof(double), hipMemcpyDeviceToHost);
if (cudaResult != hipSuccess) {
string msg("Could not copy results to host: ");
msg += hipGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction
double sum = std::accumulate(results.begin(), results.end(), double(0));
// Cleanup
if (d_points) {
hipFree(d_points);
}
if (d_results) {
hipFree(d_results);
}
// Divide sum by count to get the average
return sum / numGroups;
}
int main(int argc, char **argv)
{
unsigned power_of_4 = 12;
if (argc == 2) {
power_of_4 = atoi(argv[1]);
}
unsigned nt = 12;
if (power_of_4 > 12) {
nt <<= 2 * (power_of_4 - 12);
power_of_4 = 12;
}
try {
auto start_time = std::chrono::system_clock::now();
double avg = 0, min = 100, max = 0;
__uint128_t mcg128_state = time(NULL) | 1; // can be seeded to any odd number
for (unsigned j = 0; j < nt; j++) {
double r = es_cuda(power_of_4, (unsigned)(mcg128_state >> 64));
avg += r;
min = fmin(r, min);
max = fmax(r, max);
mcg128_state *= 0xda942042e4dd58b5ULL;
}
avg /= nt;
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> seconds = end_time - start_time;
std::cout.precision(14);
std::cout << "code,threads,power_of_4,min,avg,max,time\n";
std::cout << "CUDA," << nt << "," << power_of_4 << "," << min << "," << avg << "," << max << "," << seconds.count() << "\n";
} catch (std::runtime_error &e) { // es_cuda() can throw runtime exceptions
fprintf(stderr, "runtime error (%s)\n", e.what());
return(EXIT_FAILURE);
}
return(EXIT_SUCCESS);
}
| 173f4118f4d25f1f77e74ee968807b2f010b1566.cu | //
// Average extreme spread of five-shot group assuming impact coordinates follow standard normal distribution
//
// Building:
// nvcc -std=c++11 es_cuda.cu -o es_cuda -lcurand
//
// Running:
// for run in {1..10}; do ./es_cuda 15 | tee -a es_cuda.csv; done
//
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <cooperative_groups.h>
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <cuda_runtime.h>
#include <math.h>
#include <chrono>
#include <curand.h>
namespace cg = cooperative_groups;
using std::string;
using std::vector;
// First level of reduction
__device__ double reduce_sum(double in, cg::thread_block cta)
{
extern __shared__ double sdata[];
// Write to shared memory
unsigned ltid = threadIdx.x;
sdata[ltid] = in;
cg::sync(cta);
// Do reduction in shared memory
for (unsigned s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid] += sdata[ltid + s];
}
cg::sync(cta);
}
return sdata[0];
}
// Estimator kernel
__global__ void computeValue(double* const results,
const double* const points,
const unsigned numGroups)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Determine thread ID
unsigned bid = blockIdx.x;
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const double* pointx = points + tid;
const double* pointy = pointx + 5 * numGroups;
// Sum up extreme spread of all groups
double sum = 0;
for (unsigned i = tid ; i < numGroups; i += step, pointx += step * 5, pointy += step * 5) {
// Pairwise distances
double dx[10], dy[10];
// Unroll nested comparison loops
dx[0] = pointx[0] - pointx[1]; dy[0] = pointy[0] - pointy[1];
dx[1] = pointx[0] - pointx[2]; dy[1] = pointy[0] - pointy[2];
dx[2] = pointx[0] - pointx[3]; dy[2] = pointy[0] - pointy[3];
dx[3] = pointx[0] - pointx[4]; dy[3] = pointy[0] - pointy[4];
dx[4] = pointx[1] - pointx[2]; dy[4] = pointy[1] - pointy[2];
dx[5] = pointx[1] - pointx[3]; dy[5] = pointy[1] - pointy[3];
dx[6] = pointx[1] - pointx[4]; dy[6] = pointy[1] - pointy[4];
dx[7] = pointx[2] - pointx[3]; dy[7] = pointy[2] - pointy[3];
dx[8] = pointx[2] - pointx[4]; dy[8] = pointy[2] - pointy[4];
dx[9] = pointx[3] - pointx[4]; dy[9] = pointy[3] - pointy[4];
double max_d2 = 0;
for (unsigned j = 0; j < 10; j++) {
auto candidate_d2 = dx[j] * dx[j] + dy[j] * dy[j];
max_d2 = max(max_d2, candidate_d2);
}
double es = sqrt(max_d2);
sum += es;
}
// Reduce within the block
sum = reduce_sum(sum, cta);
// Store the result
if (threadIdx.x == 0) {
results[bid] = sum;
}
}
double es_cuda(unsigned power_of_4, unsigned seed)
{
// Get device properties
struct cudaDeviceProp deviceProperties;
cudaError_t cudaResult = cudaGetDeviceProperties(&deviceProperties, 0);
if (cudaResult != cudaSuccess) {
string msg("Could not get device properties: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
throw std::runtime_error("Device does not have double precision support");
}
// Check requested size is valid
const unsigned threadBlockSize = 128;
if (threadBlockSize > (deviceProperties.maxThreadsPerBlock)) {
throw std::runtime_error("Thread block size is greater than maxThreadsPerBlock");
}
dim3 block;
block.x = threadBlockSize;
// Attach to GPU
cudaResult = cudaSetDevice(0);
if (cudaResult != cudaSuccess) {
string msg("Could not set CUDA device: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
dim3 grid;
const unsigned numGroups = 1 << (2 * power_of_4);
grid.x = numGroups / threadBlockSize;
while (grid.x > 20 * deviceProperties.multiProcessorCount) {
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
struct cudaFuncAttributes funcAttributes;
cudaResult = cudaFuncGetAttributes(&funcAttributes, computeValue);
if (cudaResult != cudaSuccess) {
string msg("Could not get function attributes: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned)funcAttributes.maxThreadsPerBlock) {
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned)deviceProperties.maxThreadsDim[0]) {
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned)deviceProperties.maxGridSize[0]) {
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has ten random numbers to give five pairs of X and Y coordinates
double* d_points = 0;
cudaResult = cudaMalloc((void **)&d_points, 10 * numGroups * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for random numbers: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
double* d_results = 0;
cudaResult = cudaMalloc((void**)&d_results, grid.x * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for partial results: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points
curandStatus_t curandResult;
curandGenerator_t prng;
curandResult = curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not create pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandSetPseudoRandomGeneratorSeed(prng, seed);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not set seed for pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandGenerateNormalDouble(prng, (double*)d_points, 10 * numGroups, 0, 1);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not generate pseudo-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandDestroyGenerator(prng);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not destroy pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Calculate and average group size
computeValue<<<grid, block, block.x * sizeof(double)>>>(d_results, d_points, numGroups);
// Copy the results back to host
vector<double> results(grid.x);
cudaResult = cudaMemcpy(&results[0], d_results, grid.x * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaResult != cudaSuccess) {
string msg("Could not copy results to host: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction
double sum = std::accumulate(results.begin(), results.end(), double(0));
// Cleanup
if (d_points) {
cudaFree(d_points);
}
if (d_results) {
cudaFree(d_results);
}
// Divide sum by count to get the average
return sum / numGroups;
}
int main(int argc, char **argv)
{
unsigned power_of_4 = 12;
if (argc == 2) {
power_of_4 = atoi(argv[1]);
}
unsigned nt = 12;
if (power_of_4 > 12) {
nt <<= 2 * (power_of_4 - 12);
power_of_4 = 12;
}
try {
auto start_time = std::chrono::system_clock::now();
double avg = 0, min = 100, max = 0;
__uint128_t mcg128_state = time(NULL) | 1; // can be seeded to any odd number
for (unsigned j = 0; j < nt; j++) {
double r = es_cuda(power_of_4, (unsigned)(mcg128_state >> 64));
avg += r;
min = fmin(r, min);
max = fmax(r, max);
mcg128_state *= 0xda942042e4dd58b5ULL;
}
avg /= nt;
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> seconds = end_time - start_time;
std::cout.precision(14);
std::cout << "code,threads,power_of_4,min,avg,max,time\n";
std::cout << "CUDA," << nt << "," << power_of_4 << "," << min << "," << avg << "," << max << "," << seconds.count() << "\n";
} catch (std::runtime_error &e) { // es_cuda() can throw runtime exceptions
fprintf(stderr, "runtime error (%s)\n", e.what());
return(EXIT_FAILURE);
}
return(EXIT_SUCCESS);
}
|
2db06bbd910dec56d312f0465de1654e8360cc8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "check_results_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double correctvalue = 1;
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
check_results_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,correctvalue,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
check_results_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,correctvalue,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
check_results_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,correctvalue,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2db06bbd910dec56d312f0465de1654e8360cc8b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "check_results_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double correctvalue = 1;
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
check_results_kernel<<<gridBlock,threadBlock>>>(n,correctvalue,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
check_results_kernel<<<gridBlock,threadBlock>>>(n,correctvalue,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
check_results_kernel<<<gridBlock,threadBlock>>>(n,correctvalue,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a2821bd58b126d7b66480a5ca397e9bf0252e42d.hip | // !!! This is a file automatically generated by hipify!!!
#include "RayTracing.h"
#include "visualizeResults.h"
#include <time.h>
void DisplayMenu();
void StoreInFile(Vec3* mat_out_intersectionPoint, Vec3* mat_out_normal, double* mat_out_distance, double* mat_out_reflectivity, int* mat_out_visibility, int x, int y);
int main()
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
Vec3* mat_in_sensorGridData = GenerateMatrix(XMAX, YMAX);
Vec3* mat_out_intersectionPoint = (Vec3*)malloc(XMAX*YMAX * sizeof(Vec3));
Vec3* mat_out_normal = (Vec3*)malloc(XMAX*YMAX * sizeof(Vec3));
Vec3* mat_out_imageData = (Vec3*)malloc(XMAX*YMAX * sizeof(Vec3));
double* mat_out_distance = (double*)malloc(XMAX*YMAX * sizeof(double));
double* mat_out_reflectivity = (double*)malloc(XMAX*YMAX * sizeof(double));
int* mat_out_visibility = (int*)malloc(XMAX*YMAX * sizeof(int));
SensorParameterInitization();
DisplayMenu();
float milliseconds = 0;
hipEventRecord(start, 0);
RayTracer(mat_in_sensorGridData,
XMAX,
YMAX,
mat_out_intersectionPoint,
mat_out_normal,
mat_out_distance,
mat_out_reflectivity,
mat_out_visibility,
mat_out_imageData);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds*0.001);
StoreInFile(mat_out_intersectionPoint, mat_out_normal, mat_out_distance, mat_out_reflectivity, mat_out_visibility, XMAX, YMAX);
VisualizeResults(mat_out_imageData, XMAX, YMAX);
free(mat_in_sensorGridData);
free(mat_out_intersectionPoint);
free(mat_out_normal);
free(mat_out_distance);
free(mat_out_reflectivity);
free(mat_out_imageData);
getch();
}
/*
Function: Display Menu to the user
Parameters:
Output:
*/
void DisplayMenu()
{
int *objects;
int numObjects;
printf("Enter the number of objects to launch: ");
scanf(" %d", &numObjects);
objects = (int*)malloc(numObjects * sizeof(int));
for (int i = 0; i < numObjects; i++)
{
printf("___________________\n");
printf("| Plane | 1 |\n");
printf("| Triangle | 2 |\n");
printf("| Box | 3 |\n");
printf("| Sphere | 4 |\n");
printf("|___________|_____|\n");
printf("Enter your choice: %d\n", i + 1);
scanf("%d", &objects[i]);
printf("\n");
if (objects[i] == 1)
{
Plane plane;
printf("A: ");
scanf("%lf", &plane.A);
printf("B: ");
scanf("%lf", &plane.B);
printf("C: ");
scanf("%lf", &plane.C);
printf("D: ");
scanf("%lf", &plane.D);
printf("Reflectivity:");
scanf("%lf", &plane.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &plane.color.x);
printf("Green Component: ");
scanf("%lf", &plane.color.y);
printf("Blue Component: ");
scanf("%lf", &plane.color.z);
planeListHolder.push_back(plane);
}
else if (objects[i] == 2)
{
Triangle triangle;
printf("Vertex 1: X: ");
scanf("%lf", &triangle.P1.x);
printf("Vertex 1: Y: ");
scanf("%lf", &triangle.P1.y);
printf("Vertex 1: Z: ");
scanf("%lf", &triangle.P1.z);
printf("Vertex 2: X: ");
scanf("%lf", &triangle.P2.x);
printf("Vertex 2: Y: ");
scanf("%lf", &triangle.P2.y);
printf("Vertex 2: Z: ");
scanf("%lf", &triangle.P2.z);
printf("Vertex 3: X: ");
scanf("%lf", &triangle.P3.x);
printf("Vertex 3: Y: ");
scanf("%lf", &triangle.P3.y);
printf("Vertex 3: Z: ");
scanf("%lf", &triangle.P3.z);
printf("Reflectivity:");
scanf("%lf", &triangle.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &triangle.color.x);
printf("Green Component: ");
scanf("%lf", &triangle.color.y);
printf("Blue Component: ");
scanf("%lf", &triangle.color.z);
triangleListHolder.push_back(triangle);
}
else if (objects[i] == 3)
{
Box box;
printf("MinPoint: X: ");
scanf("%lf", &box.minPt.x);
printf("MinPoint: Y: ");
scanf("%lf", &box.minPt.y);
printf("MinPoint: Z: ");
scanf("%lf", &box.minPt.z);
printf("MaxPoint: X: ");
scanf("%lf", &box.maxPt.x);
printf("MaxPoint: Y: ");
scanf("%lf", &box.maxPt.y);
printf("MaxPoint: Z: ");
scanf("%lf", &box.maxPt.z);
printf("Reflectivity:");
scanf("%lf", &box.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &box.color.x);
printf("Green Component: ");
scanf("%lf", &box.color.y);
printf("Blue Component: ");
scanf("%lf", &box.color.z);
boxListHolder.push_back(box);
}
else if (objects[i] == 4)
{
Sphere sphere;
printf("Radius: ");
scanf("%lf", &sphere.radius);
printf("CenterX: ");
scanf("%lf", &sphere.center.x);
printf("CenterY: ");
scanf("%lf", &sphere.center.y);
printf("CenterZ: ");
scanf("%lf", &sphere.center.z);
printf("Reflectivity:");
scanf("%lf", &sphere.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &sphere.color.x);
printf("Green Component: ");
scanf("%lf", &sphere.color.y);
printf("Blue Component: ");
scanf("%lf", &sphere.color.z);
sphereListHolder.push_back(sphere);
}
}
}
void StoreInFile(Vec3* mat_out_intersectionPoint, Vec3* mat_out_normal, double* mat_out_distance, double* mat_out_reflectivity, int* mat_out_visibility, int x, int y)
{
FILE* fp1 = fopen("IntersectionPoint.csv", "w");
FILE* fp2 = fopen("Normal.csv", "w");
FILE* fp3 = fopen("Distance.csv", "w");
FILE* fp4 = fopen("Reflectivity.csv", "w");
FILE* fp5 = fopen("Visibility.csv", "w");
for (int row = 0; row < y; row++)
{
for (int col = 0; col < x; col++)
{
if (mat_out_intersectionPoint[row*x + col].x != 9999.0)
{
//Intersection Point
fprintf(fp1, "x: %lf y: %lf z: %lf,", mat_out_intersectionPoint[row*x + col].x, mat_out_intersectionPoint[row*x + col].y, mat_out_intersectionPoint[row*x + col].z);
fprintf(fp2, "x: %lf y: %lf z: %lf,", mat_out_normal[row*x + col].x, mat_out_normal[row*x + col].y, mat_out_normal[row*x + col].z);
fprintf(fp3, "%lf,", mat_out_distance[row*x + col]);
fprintf(fp4, "%lf,", mat_out_reflectivity[row*x + col]);
fprintf(fp5, "%d,", mat_out_visibility[row*x + col]);
}
else
{
//No IntersectionPoint
fprintf(fp1, "x: %lf y: %lf z: %lf,", -1.0, -1.0, -1.0);
fprintf(fp2, "x: %lf y: %lf z: %lf,", -1.0, -1.0, -1.0);
fprintf(fp3, "%lf,", 0);
fprintf(fp4, "%lf,", 0);
fprintf(fp5, "%d,", 0);
}
}
fprintf(fp1, "\n");
fprintf(fp2, "\n");
fprintf(fp3, "\n");
fprintf(fp4, "\n");
fprintf(fp5, "\n");
}
fclose(fp1);
fclose(fp2);
fclose(fp3);
fclose(fp4);
fclose(fp5);
}
| a2821bd58b126d7b66480a5ca397e9bf0252e42d.cu | #include "RayTracing.h"
#include "visualizeResults.h"
#include <time.h>
void DisplayMenu();
void StoreInFile(Vec3* mat_out_intersectionPoint, Vec3* mat_out_normal, double* mat_out_distance, double* mat_out_reflectivity, int* mat_out_visibility, int x, int y);
int main()
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
Vec3* mat_in_sensorGridData = GenerateMatrix(XMAX, YMAX);
Vec3* mat_out_intersectionPoint = (Vec3*)malloc(XMAX*YMAX * sizeof(Vec3));
Vec3* mat_out_normal = (Vec3*)malloc(XMAX*YMAX * sizeof(Vec3));
Vec3* mat_out_imageData = (Vec3*)malloc(XMAX*YMAX * sizeof(Vec3));
double* mat_out_distance = (double*)malloc(XMAX*YMAX * sizeof(double));
double* mat_out_reflectivity = (double*)malloc(XMAX*YMAX * sizeof(double));
int* mat_out_visibility = (int*)malloc(XMAX*YMAX * sizeof(int));
SensorParameterInitization();
DisplayMenu();
float milliseconds = 0;
cudaEventRecord(start, 0);
RayTracer(mat_in_sensorGridData,
XMAX,
YMAX,
mat_out_intersectionPoint,
mat_out_normal,
mat_out_distance,
mat_out_reflectivity,
mat_out_visibility,
mat_out_imageData);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds*0.001);
StoreInFile(mat_out_intersectionPoint, mat_out_normal, mat_out_distance, mat_out_reflectivity, mat_out_visibility, XMAX, YMAX);
VisualizeResults(mat_out_imageData, XMAX, YMAX);
free(mat_in_sensorGridData);
free(mat_out_intersectionPoint);
free(mat_out_normal);
free(mat_out_distance);
free(mat_out_reflectivity);
free(mat_out_imageData);
getch();
}
/*
Function: Display Menu to the user
Parameters:
Output:
*/
void DisplayMenu()
{
int *objects;
int numObjects;
printf("Enter the number of objects to launch: ");
scanf(" %d", &numObjects);
objects = (int*)malloc(numObjects * sizeof(int));
for (int i = 0; i < numObjects; i++)
{
printf("___________________\n");
printf("| Plane | 1 |\n");
printf("| Triangle | 2 |\n");
printf("| Box | 3 |\n");
printf("| Sphere | 4 |\n");
printf("|___________|_____|\n");
printf("Enter your choice: %d\n", i + 1);
scanf("%d", &objects[i]);
printf("\n");
if (objects[i] == 1)
{
Plane plane;
printf("A: ");
scanf("%lf", &plane.A);
printf("B: ");
scanf("%lf", &plane.B);
printf("C: ");
scanf("%lf", &plane.C);
printf("D: ");
scanf("%lf", &plane.D);
printf("Reflectivity:");
scanf("%lf", &plane.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &plane.color.x);
printf("Green Component: ");
scanf("%lf", &plane.color.y);
printf("Blue Component: ");
scanf("%lf", &plane.color.z);
planeListHolder.push_back(plane);
}
else if (objects[i] == 2)
{
Triangle triangle;
printf("Vertex 1: X: ");
scanf("%lf", &triangle.P1.x);
printf("Vertex 1: Y: ");
scanf("%lf", &triangle.P1.y);
printf("Vertex 1: Z: ");
scanf("%lf", &triangle.P1.z);
printf("Vertex 2: X: ");
scanf("%lf", &triangle.P2.x);
printf("Vertex 2: Y: ");
scanf("%lf", &triangle.P2.y);
printf("Vertex 2: Z: ");
scanf("%lf", &triangle.P2.z);
printf("Vertex 3: X: ");
scanf("%lf", &triangle.P3.x);
printf("Vertex 3: Y: ");
scanf("%lf", &triangle.P3.y);
printf("Vertex 3: Z: ");
scanf("%lf", &triangle.P3.z);
printf("Reflectivity:");
scanf("%lf", &triangle.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &triangle.color.x);
printf("Green Component: ");
scanf("%lf", &triangle.color.y);
printf("Blue Component: ");
scanf("%lf", &triangle.color.z);
triangleListHolder.push_back(triangle);
}
else if (objects[i] == 3)
{
Box box;
printf("MinPoint: X: ");
scanf("%lf", &box.minPt.x);
printf("MinPoint: Y: ");
scanf("%lf", &box.minPt.y);
printf("MinPoint: Z: ");
scanf("%lf", &box.minPt.z);
printf("MaxPoint: X: ");
scanf("%lf", &box.maxPt.x);
printf("MaxPoint: Y: ");
scanf("%lf", &box.maxPt.y);
printf("MaxPoint: Z: ");
scanf("%lf", &box.maxPt.z);
printf("Reflectivity:");
scanf("%lf", &box.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &box.color.x);
printf("Green Component: ");
scanf("%lf", &box.color.y);
printf("Blue Component: ");
scanf("%lf", &box.color.z);
boxListHolder.push_back(box);
}
else if (objects[i] == 4)
{
Sphere sphere;
printf("Radius: ");
scanf("%lf", &sphere.radius);
printf("CenterX: ");
scanf("%lf", &sphere.center.x);
printf("CenterY: ");
scanf("%lf", &sphere.center.y);
printf("CenterZ: ");
scanf("%lf", &sphere.center.z);
printf("Reflectivity:");
scanf("%lf", &sphere.reflectivity);
printf("Color:\n");
printf("Red Component: ");
scanf("%lf", &sphere.color.x);
printf("Green Component: ");
scanf("%lf", &sphere.color.y);
printf("Blue Component: ");
scanf("%lf", &sphere.color.z);
sphereListHolder.push_back(sphere);
}
}
}
void StoreInFile(Vec3* mat_out_intersectionPoint, Vec3* mat_out_normal, double* mat_out_distance, double* mat_out_reflectivity, int* mat_out_visibility, int x, int y)
{
FILE* fp1 = fopen("IntersectionPoint.csv", "w");
FILE* fp2 = fopen("Normal.csv", "w");
FILE* fp3 = fopen("Distance.csv", "w");
FILE* fp4 = fopen("Reflectivity.csv", "w");
FILE* fp5 = fopen("Visibility.csv", "w");
for (int row = 0; row < y; row++)
{
for (int col = 0; col < x; col++)
{
if (mat_out_intersectionPoint[row*x + col].x != 9999.0)
{
//Intersection Point
fprintf(fp1, "x: %lf y: %lf z: %lf,", mat_out_intersectionPoint[row*x + col].x, mat_out_intersectionPoint[row*x + col].y, mat_out_intersectionPoint[row*x + col].z);
fprintf(fp2, "x: %lf y: %lf z: %lf,", mat_out_normal[row*x + col].x, mat_out_normal[row*x + col].y, mat_out_normal[row*x + col].z);
fprintf(fp3, "%lf,", mat_out_distance[row*x + col]);
fprintf(fp4, "%lf,", mat_out_reflectivity[row*x + col]);
fprintf(fp5, "%d,", mat_out_visibility[row*x + col]);
}
else
{
//No IntersectionPoint
fprintf(fp1, "x: %lf y: %lf z: %lf,", -1.0, -1.0, -1.0);
fprintf(fp2, "x: %lf y: %lf z: %lf,", -1.0, -1.0, -1.0);
fprintf(fp3, "%lf,", 0);
fprintf(fp4, "%lf,", 0);
fprintf(fp5, "%d,", 0);
}
}
fprintf(fp1, "\n");
fprintf(fp2, "\n");
fprintf(fp3, "\n");
fprintf(fp4, "\n");
fprintf(fp5, "\n");
}
fclose(fp1);
fclose(fp2);
fclose(fp3);
fclose(fp4);
fclose(fp5);
}
|
8716925ba700a8246e0faf1361120fa093c27508.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
class Bmp256 { //
#pragma pack(2) // n = 2
struct Header { //
uint16_t bfType = 0x4D42;
uint32_t bfSize;
uint16_t bfReserved1 = 0;
uint16_t bfReserved2 = 0;
uint32_t bfOffBits = 54 + 256 * 4;
uint32_t biSize = 40;
int32_t biWidth;
int32_t biHeight;
uint16_t biPlanes = 1;
uint16_t biBitCount = 8;
uint32_t biCompression = 0;
uint32_t biSizeImage = 0;
int32_t biXPelsPerMeter = 0;
int32_t biYPelsPerMeter = 0;
uint32_t biClrUsed = 256;
uint32_t biClrImportant = 256;
} header;
#pragma pack() // 8
int32_t rowSize; //
struct { //
uint8_t B, G, R, A;
void set(uint8_t r, uint8_t g, uint8_t b) { R = r; G = g; B = b; A = 0; } //
} palette[256]; // 256
uint8_t *buffer = NULL; //
void calc_palette();
public:
Bmp256(int width, int height); //
~Bmp256() { delete[] buffer; } //
int width() const { return header.biWidth; } //
int height() const { return header.biHeight; } //
uint8_t& operator()(int row, int col) { return buffer[row * rowSize + col]; } // get/set the pixel
void save(const char* file); //
uint8_t* get_ptr() { return buffer; }; //
int image_size() { return header.bfSize - header.bfOffBits; };
};
Bmp256::Bmp256(int width, int height) {
header.biWidth = width; //
header.biHeight = height;
rowSize = width; //
int buffSize = rowSize * height; //
header.bfSize = header.bfOffBits + buffSize;
calc_palette(); //
buffer = new uint8_t[buffSize]; //
}
void Bmp256::calc_palette() {
for (int i = 0; i < 64; ++i) {
palette[i].set(255, 255 - i * 4, 0);
palette[i + 64].set(255 - i * 2, 0, i * 2);
palette[i + 128].set(127 - i * 2, 0, 128 + i * 2);
palette[i + 192].set(0, 0, 255 - i * 3);
}
palette[0].set(0, 0, 0);
}
#include <iostream>
#include <fstream>
void Bmp256::save(const char* file_name) { //
std::ofstream of(file_name, std::ios::binary);
of.write((char *)&header, 54);
of.write((char *)palette, 256 * 4);
char* p = (char *)buffer;
for (int i = 0; i < header.biHeight; ++i) {
of.write(p, rowSize);
p += rowSize;
}
}
const double RMIN = -2, RMAX = 1, IMIN = -1, IMAX = 1;//
const int W = 12 * 1024; // 12*1024
const double RESN = W / (RMAX - RMIN); // 12*1024/1-(-2)=4*1024
const int H = (IMAX - IMIN) * RESN; // 1-(-1)*4*1024=8*1024
const int MI = 1;
// int Mandelbrot(complex c) { //
// complex z;
// for (int k = 256 * MI - 1; k >= 0; --k) {
// z = z * z + c;
// if (std::norm(z) > 4) return k / MI; //
// }
// return 0;
// }
#include <ctime>
#include <hip/hip_runtime.h>
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int Mandelbrot( int x, int y ) {
float jx = RMIN + x / RESN;
float jy = IMIN + y / RESN;
hipComplex a(0, 0);
hipComplex c(jx, jy);
int k = 256 * MI - 1;
for (int k = 256 * MI - 1; k >= 0; --k) {
a = a * a + c;
if (a.magnitude2() > 4) {return k / MI;};
}
return 0;
}
__global__ void kernel( uint8_t *ptr ) {
// map from threadIdx/BlockIdx to position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * W;
ptr[offset] = Mandelbrot( x, y );
}
int main() {
Bmp256 bmp(W, H);
clock_t t1 = clock();; //
uint8_t *dev_bitmap;
hipMalloc( (void**)&dev_bitmap,
bmp.image_size());
dim3 grid(W, H);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap );
hipMemcpy( bmp.get_ptr(),
dev_bitmap,
bmp.image_size(),
hipMemcpyDeviceToHost );
hipFree( dev_bitmap );
clock_t t2 = clock();;
bmp.save("Mandelbrot12k.bmp");
std::cout << "run time: " << (double)(t2 - t1) / CLOCKS_PER_SEC << " seconds.\n";
}
| 8716925ba700a8246e0faf1361120fa093c27508.cu | #include <stdint.h>
class Bmp256 { //自定义图像类
#pragma pack(2) // 设定变量以n = 2字节对齐方式
struct Header { // 头信息
uint16_t bfType = 0x4D42;
uint32_t bfSize;
uint16_t bfReserved1 = 0;
uint16_t bfReserved2 = 0;
uint32_t bfOffBits = 54 + 256 * 4;
uint32_t biSize = 40;
int32_t biWidth;
int32_t biHeight;
uint16_t biPlanes = 1;
uint16_t biBitCount = 8;
uint32_t biCompression = 0;
uint32_t biSizeImage = 0;
int32_t biXPelsPerMeter = 0;
int32_t biYPelsPerMeter = 0;
uint32_t biClrUsed = 256;
uint32_t biClrImportant = 256;
} header;
#pragma pack() // 默认值8
int32_t rowSize; // 行大小
struct { // 像素点通道结构
uint8_t B, G, R, A;
void set(uint8_t r, uint8_t g, uint8_t b) { R = r; G = g; B = b; A = 0; } // 设置颜色函数
} palette[256]; // 256个颜色的调色板
uint8_t *buffer = NULL; // 图像缓存
void calc_palette();
public:
Bmp256(int width, int height); // 类构造函数
~Bmp256() { delete[] buffer; } // 类析构函数
int width() const { return header.biWidth; } // 获取图像宽度
int height() const { return header.biHeight; } // 获取图像高度
uint8_t& operator()(int row, int col) { return buffer[row * rowSize + col]; } // get/set the pixel
void save(const char* file); // 保存图像
uint8_t* get_ptr() { return buffer; }; //获取像素
int image_size() { return header.bfSize - header.bfOffBits; };
};
Bmp256::Bmp256(int width, int height) {
header.biWidth = width; // 从头信息中获取宽度和高度
header.biHeight = height;
rowSize = width; // 计算行大小
int buffSize = rowSize * height; // 图像整体缓存的大小
header.bfSize = header.bfOffBits + buffSize;
calc_palette(); // 初始化调色板颜色
buffer = new uint8_t[buffSize]; // 新建图像缓存
}
void Bmp256::calc_palette() {
for (int i = 0; i < 64; ++i) {
palette[i].set(255, 255 - i * 4, 0);
palette[i + 64].set(255 - i * 2, 0, i * 2);
palette[i + 128].set(127 - i * 2, 0, 128 + i * 2);
palette[i + 192].set(0, 0, 255 - i * 3);
}
palette[0].set(0, 0, 0);
}
#include <iostream>
#include <fstream>
void Bmp256::save(const char* file_name) { // 保存
std::ofstream of(file_name, std::ios::binary);
of.write((char *)&header, 54);
of.write((char *)palette, 256 * 4);
char* p = (char *)buffer;
for (int i = 0; i < header.biHeight; ++i) {
of.write(p, rowSize);
p += rowSize;
}
}
const double RMIN = -2, RMAX = 1, IMIN = -1, IMAX = 1;// 实部和虚部的范围
const int W = 12 * 1024; // 宽度:12*1024
const double RESN = W / (RMAX - RMIN); // 实部单位像素数12*1024/(1-(-2))=4*1024
const int H = (IMAX - IMIN) * RESN; // 高度:(1-(-1))*4*1024=8*1024
const int MI = 1;
// int Mandelbrot(complex c) { // 曼德博集合是一种在复平面上组成分形的点的集合
// complex z;
// for (int k = 256 * MI - 1; k >= 0; --k) {
// z = z * z + c;
// if (std::norm(z) > 4) return k / MI; //计算分形
// }
// return 0;
// }
#include <ctime>
#include <cuda_runtime.h>
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int Mandelbrot( int x, int y ) {
float jx = RMIN + x / RESN;
float jy = IMIN + y / RESN;
cuComplex a(0, 0);
cuComplex c(jx, jy);
int k = 256 * MI - 1;
for (int k = 256 * MI - 1; k >= 0; --k) {
a = a * a + c;
if (a.magnitude2() > 4) {return k / MI;};
}
return 0;
}
__global__ void kernel( uint8_t *ptr ) {
// map from threadIdx/BlockIdx to position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * W;
ptr[offset] = Mandelbrot( x, y );
}
int main() {
Bmp256 bmp(W, H);
clock_t t1 = clock();; // 单调计时时钟
uint8_t *dev_bitmap;
cudaMalloc( (void**)&dev_bitmap,
bmp.image_size());
dim3 grid(W, H);
kernel<<<grid,1>>>( dev_bitmap );
cudaMemcpy( bmp.get_ptr(),
dev_bitmap,
bmp.image_size(),
cudaMemcpyDeviceToHost );
cudaFree( dev_bitmap );
clock_t t2 = clock();;
bmp.save("Mandelbrot12k.bmp");
std::cout << "run time: " << (double)(t2 - t1) / CLOCKS_PER_SEC << " seconds.\n";
}
|
3184dafd82a0abdd7c3daf62c9c122c1a31e272f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "structure.h"
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
static const int kThreads = 256;
using IndexT = int;
__managed__ NodeBase **dev_nodes;
__managed__ SpringBase **dev_springs;
__device__ void new_NodeBase(NodeBase *node, float pos_x, float pos_y) {
node->pos_x = pos_x;
node->pos_y = pos_y;
node->num_springs = 0;
node->type = kTypeNodeBase;
for (int i = 0; i < kMaxDegree; ++i) {
node->springs[i] = NULL;
}
}
__device__ void new_AnchorNode(NodeBase *node, float pos_x, float pos_y) {
new_NodeBase(node, pos_x, pos_y);
node->type = kTypeAnchorNode;
}
__device__ void new_AnchorPullNode(NodeBase *node, float pos_x, float pos_y,
float vel_x, float vel_y) {
new_AnchorNode(node, pos_x, pos_y);
node->vel_x = vel_x;
node->vel_y = vel_y;
node->type = kTypeAnchorPullNode;
}
__device__ void new_Node(NodeBase *node, float pos_x, float pos_y, float mass) {
new_NodeBase(node, pos_x, pos_y);
node->mass = mass;
node->type = kTypeNode;
}
// __device__ float NodeBase_distance_to(Node *node, Node *other) {
// float dx = node->pos_x - other->pos_x;
// float dy = node->pos_y - other->pos_y;
// float dist_sq = dx * dx + dy * dy;
// return sqrt(dist_sq);
// }
__device__ void NodeBase_add_spring(NodeBase *node, SpringBase *spring) {
assert(node != NULL);
int idx = atomicAdd(&node->num_springs, 1);
assert(idx + 1 <= kMaxDegree);
node->springs[idx] = spring;
// assert(spring->p1 == node || spring->p2 == node);
}
__device__ void new_Spring(SpringBase *spring, NodeBase *p1, NodeBase *p2,
float spring_factor, float max_force) {
spring->is_active = true;
spring->p1 = p1;
spring->p2 = p2;
spring->factor = spring_factor;
spring->force = 0.0f;
spring->max_force = max_force;
spring->initial_length = p1->distance_to(p2);
spring->delete_flag = false;
// if (!(spring->initial_length > 0.0f))
// printf("%f \n", spring->initial_length);
assert(spring->initial_length > 0.0f);
NodeBase_add_spring(p1, spring);
NodeBase_add_spring(p2, spring);
}
__device__ void NodeBase_remove_spring(NodeBase *node, SpringBase *spring) {
for (int i = 0; i < kMaxDegree; ++i) {
if (node->springs[i] == spring) {
node->springs[i] = NULL;
if (atomicSub(&node->num_springs, 1) == 1) {
// Deleted last spring.
node->type = 0;
}
return;
}
}
// Spring not found.
assert(false);
}
__device__ void AnchorPullNode_pull(NodeBase *node) {
node->pos_x += node->vel_x * kDt;
node->pos_y += node->vel_y * kDt;
}
__device__ void Spring_self_destruct(SpringBase *spring) {
NodeBase_remove_spring(spring->get_p1(), spring);
NodeBase_remove_spring(spring->get_p2(), spring);
spring->is_active = false;
}
__device__ void Spring_compute_force(SpringBase *spring) {
float dist = spring->get_p1()->distance_to(spring->get_p2());
float displacement = max(0.0f, dist - spring->get_init_len());
spring->update_force(displacement);
if (spring->is_max_force()) {
spring->get_p1()->remove_spring(spring);
spring->get_p2()->remove_spring(spring);
spring->deactivate();
// Spring_self_destruct(spring);
}
}
__device__ void Node_move(NodeBase *node) {
float force_x = 0.0f;
float force_y = 0.0f;
for (int i = 0; i < kMaxDegree; ++i) {
SpringBase *s = node->spring(i);
if (s != NULL) {
NodeBase *from;
NodeBase *to;
if (s->get_p1() == node) {
from = node;
to = s->get_p2();
} else {
assert(s->get_p2() == node);
from = node;
to = s->get_p1( );
}
// Calculate unit vector.
float dist = to->distance_to(from);
float unit_x = to->unit_x(from, dist);
float unit_y = to->unit_y(from, dist);
// Apply force.
force_x += unit_x * s->get_force();
force_y += unit_y * s->get_force();
}
}
// Calculate new velocity and position.
node->update_vel_x(force_x);
node->update_vel_y(force_y);
node->update_pos_x(force_x);
node->update_pos_y(force_y);
}
__device__ void NodeBase_initialize_bfs(NodeBase *node) {
if (node->type == kTypeAnchorNode) {
node->set_distance(0);
} else {
node->set_distance(kMaxDistance); // should be int_max
}
}
__device__ bool dev_bfs_continue;
__device__ void NodeBase_bfs_visit(NodeBase *node, int distance) {
if (distance == node->get_distance()) {
// Continue until all vertices were visited.
dev_bfs_continue = true;
for (int i = 0; i < kMaxDegree; ++i) {
SpringBase *spring = node->spring(i);
if (spring != NULL) {
// Find neighboring vertices.
NodeBase *n;
if (node == spring->get_p1()) {
n = spring->get_p2();
} else {
n = spring->get_p1();
}
if (n->get_distance() == kMaxDistance) {
// Set distance on neighboring vertex if unvisited.
n->set_distance(distance + 1);
}
}
}
}
}
__device__ void Spring_bfs_delete(SpringBase *spring) {
if (spring->delete_flag) {
spring->get_p1()->remove_spring(spring);
spring->get_p2()->remove_spring(spring);
spring->deactivate();
}
}
__device__ void NodeBase_bfs_set_delete_flags(NodeBase *node) {
if (node->distance == kMaxDistance) { // should be int_max
for (int i = 0; i < kMaxDegree; ++i) {
SpringBase *spring = node->spring(i);
if (spring != NULL) {
spring->delete_flag = true;
//Spring_bfs_delete(spring);
}
}
}
}
// Only for rendering and checksum computation.
__device__ int dev_num_springs;
__device__ SpringInfo dev_spring_info[kMaxSprings];
int host_num_springs;
SpringInfo host_spring_info[kMaxSprings];
__device__ void Spring_add_to_rendering_array(SpringBase *spring) {
int idx = atomicAdd(&dev_num_springs, 1);
dev_spring_info[idx].p1_x = spring->p1->pos_x;
dev_spring_info[idx].p1_y = spring->p1->pos_y;
dev_spring_info[idx].p2_x = spring->p2->pos_x;
dev_spring_info[idx].p2_y = spring->p2->pos_y;
dev_spring_info[idx].force = spring->force;
dev_spring_info[idx].max_force = spring->max_force;
}
__global__ void kernel_AnchorPullNode_pull() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type == kTypeAnchorPullNode) {
dev_nodes[i]->pull();
}
}
}
__global__ void kernel_Node_move() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type == kTypeNode) {
Node_move(dev_nodes[i]);
}
}
}
__global__ void kernel_NodeBase_initialize_bfs() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type != 0) {
NodeBase_initialize_bfs(dev_nodes[i]);
}
}
}
__global__ void kernel_NodeBase_bfs_visit(int dist) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type != 0) {
NodeBase_bfs_visit(dev_nodes[i], dist);
}
}
}
__global__ void kernel_NodeBase_bfs_set_delete_flags() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type != 0) {
NodeBase_bfs_set_delete_flags(dev_nodes[i]);
}
}
}
__global__ void kernel_Spring_compute_force() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings;
i += blockDim.x * gridDim.x) {
if (dev_springs[i]->get_is_active()) {
Spring_compute_force(dev_springs[i]);
}
}
}
__global__ void kernel_Spring_bfs_delete() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings;
i += blockDim.x * gridDim.x) {
if (dev_springs[i]->get_is_active()) {
Spring_bfs_delete(dev_springs[i]);
}
}
}
__global__ void kernel_Spring_add_to_rendering_array() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings;
i += blockDim.x * gridDim.x) {
if (dev_springs[i]->get_is_active()) {
Spring_add_to_rendering_array(dev_springs[i]);
}
}
}
void initialize_nodes(obj_alloc *alloc) {
for (int i = 0; i < kMaxNodes; i += 1) {
dev_nodes[i] = (NodeBase *)alloc->my_new<Node>();
// assert(dev_nodes[i] != NULL);
dev_nodes[i]->type = 0;
}
}
void initialize_springs(obj_alloc *alloc) {
for (int i = 0; i < kMaxSprings; i += 1) {
dev_springs[i] = (SpringBase *)alloc->my_new<Spring>();
assert(dev_springs[i] != NULL);
dev_springs[i]->is_active = false;
}
}
void transfer_data() {
int zero = 0;
hipMemcpyToSymbol(dev_num_springs, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Spring_add_to_rendering_array), dim3(128), dim3(128), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(&host_num_springs, dev_num_springs, sizeof(int), 0,
hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(host_spring_info, dev_spring_info,
sizeof(SpringInfo) * host_num_springs, 0,
hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
}
float checksum() {
transfer_data();
float result = 0.0f;
for (int i = 0; i < host_num_springs; ++i) {
result += host_spring_info[i].p1_x * host_spring_info[i].p2_y *
host_spring_info[i].force;
}
return result;
}
void compute() {
hipLaunchKernelGGL(( kernel_Spring_compute_force), dim3((kMaxSprings + kThreads - 1) / kThreads),
dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Node_move), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void bfs_and_delete() {
// Perform BFS to check reachability.
hipLaunchKernelGGL(( kernel_NodeBase_initialize_bfs), dim3((kMaxNodes + kThreads - 1) / kThreads),
dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
for (int i = 0; i < kMaxDistance; ++i) {
bool continue_flag = false;
hipMemcpyToSymbol(dev_bfs_continue, &continue_flag, sizeof(bool), 0,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_NodeBase_bfs_visit), dim3((kMaxNodes + kThreads - 1) / kThreads),
dim3(kThreads), 0, 0, i);
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(&continue_flag, dev_bfs_continue, sizeof(bool), 0,
hipMemcpyDeviceToHost);
if (!continue_flag) break;
}
// Delete springs (and nodes).
hipLaunchKernelGGL(( kernel_NodeBase_bfs_set_delete_flags),
dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Spring_bfs_delete), dim3((kMaxSprings + kThreads - 1) / kThreads),
dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void step() {
hipLaunchKernelGGL(( kernel_AnchorPullNode_pull), dim3((kMaxNodes + kThreads - 1) / kThreads),
dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
for (int i = 0; i < kNumComputeIterations; ++i) {
compute();
}
bfs_and_delete();
}
void initialize_memory(obj_alloc *alloc) {
initialize_nodes(alloc);
initialize_springs(alloc);
}
__device__ IndexT dev_tmp_nodes[kMaxNodes];
__device__ IndexT dev_node_counter;
__global__ void kernel_create_nodes(DsNode *nodes, int num_nodes) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_nodes;
i += blockDim.x * gridDim.x) {
int idx = atomicAdd(&dev_node_counter, 1);
assert(dev_nodes != NULL);
dev_tmp_nodes[i] = idx;
assert(dev_nodes[idx] != NULL);
if (nodes[i].type == kTypeNode) {
new_Node(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y,
nodes[i].mass);
} else if (nodes[i].type == kTypeAnchorPullNode) {
new_AnchorPullNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y,
nodes[i].vel_x, nodes[i].vel_y);
} else if (nodes[i].type == kTypeAnchorNode) {
new_AnchorNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y);
} else {
assert(false);
}
}
}
__global__ void kernel_create_springs(DsSpring *springs, int num_springs) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_springs;
i += blockDim.x * gridDim.x) {
assert(dev_springs[i] != nullptr);
new_Spring(dev_springs[i], dev_nodes[dev_tmp_nodes[springs[i].p1]],
dev_nodes[dev_tmp_nodes[springs[i].p2]],
springs[i].spring_factor, springs[i].max_force);
// printf("%p \n", dev_springs[i]);
}
}
void load_dataset(Dataset &dataset) {
DsNode *host_nodes;
hipMalloc(&host_nodes, sizeof(DsNode) * dataset.nodes.size());
hipMemcpy(host_nodes, dataset.nodes.data(),
sizeof(DsNode) * dataset.nodes.size(), hipMemcpyHostToDevice);
DsSpring *host_springs;
hipMalloc(&host_springs, sizeof(DsSpring) * dataset.springs.size());
hipMemcpy(host_springs, dataset.springs.data(),
sizeof(DsSpring) * dataset.springs.size(),
hipMemcpyHostToDevice);
gpuErrchk(hipDeviceSynchronize());
IndexT zero = 0;
hipMemcpyToSymbol(dev_node_counter, &zero, sizeof(IndexT), 0,
hipMemcpyHostToDevice);
gpuErrchk(hipDeviceSynchronize());
assert(dataset.nodes.size() == kMaxNodes);
// kernel_create_nodes1<<<(kMaxNodes + kThreads - 1) / kThreads,
// kThreads>>>(
// host_nodes, dataset.nodes.size());
hipLaunchKernelGGL(( kernel_create_nodes), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0,
host_nodes, dataset.nodes.size());
gpuErrchk(hipDeviceSynchronize());
// kernel_create_spring1<<<(kMaxSprings + kThreads - 1) / kThreads,
// kThreads>>>(
// host_nodes, dataset.springs.size());
hipLaunchKernelGGL(( kernel_create_springs), dim3((kMaxSprings + kThreads - 1) / kThreads),
dim3(kThreads), 0, 0, host_springs, dataset.springs.size());
gpuErrchk(hipDeviceSynchronize());
hipFree(host_nodes);
hipFree(host_springs);
}
int main(int /*argc*/, char ** argv) {
// Allocate memory.
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[1]));
high_resolution_clock::time_point t1 = high_resolution_clock::now();
dev_nodes = (NodeBase **) my_obj_alloc.calloc<NodeBase *> (kMaxNodes);
dev_springs = (SpringBase **) my_obj_alloc.calloc<SpringBase *> (kMaxSprings);
// hipMalloc(&dev_nodes, sizeof(NodeBase *) * kMaxNodes);
// hipMemcpyToSymbol(dev_nodes, &host_nodes, sizeof(Node *), 0,
// hipMemcpyHostToDevice);
// assert(dev_nodes != NULL);
// printf("%p\n", dev_nodes);
// Spring *host_springs;
// hipMalloc(&dev_springs, sizeof(SpringBase *) * kMaxSprings);
// hipMemcpyToSymbol(dev_springs, &host_springs, sizeof(Spring *), 0,
// hipMemcpyHostToDevice);
initialize_memory(&my_obj_alloc);
high_resolution_clock::time_point t2 = high_resolution_clock::now();
my_obj_alloc.toDevice();
high_resolution_clock::time_point t3 = high_resolution_clock::now();
duration<double> alloc_time = duration_cast<duration<double>>(t2 - t1);
duration<double> vptr_time = duration_cast<duration<double>>(t3 - t2);
printf("alloc_time : %f \nvptr patching : %f \n",alloc_time.count(),vptr_time.count() );
printf("number of objs:%d\n", kMaxNodes + kMaxSprings);
Dataset dataset;
random_dataset(dataset);
load_dataset(dataset);
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumSteps; ++i) {
#ifndef NDEBUG
printf("%i\n", i);
#endif // NDEBUG
step();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
printf("%lu\n", micros);
#ifndef NDEBUG
printf("Checksum: %f\n", checksum());
#endif // NDEBUG
}
| 3184dafd82a0abdd7c3daf62c9c122c1a31e272f.cu |
#include "structure.h"
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
static const int kThreads = 256;
using IndexT = int;
__managed__ NodeBase **dev_nodes;
__managed__ SpringBase **dev_springs;
__device__ void new_NodeBase(NodeBase *node, float pos_x, float pos_y) {
node->pos_x = pos_x;
node->pos_y = pos_y;
node->num_springs = 0;
node->type = kTypeNodeBase;
for (int i = 0; i < kMaxDegree; ++i) {
node->springs[i] = NULL;
}
}
__device__ void new_AnchorNode(NodeBase *node, float pos_x, float pos_y) {
new_NodeBase(node, pos_x, pos_y);
node->type = kTypeAnchorNode;
}
__device__ void new_AnchorPullNode(NodeBase *node, float pos_x, float pos_y,
float vel_x, float vel_y) {
new_AnchorNode(node, pos_x, pos_y);
node->vel_x = vel_x;
node->vel_y = vel_y;
node->type = kTypeAnchorPullNode;
}
__device__ void new_Node(NodeBase *node, float pos_x, float pos_y, float mass) {
new_NodeBase(node, pos_x, pos_y);
node->mass = mass;
node->type = kTypeNode;
}
// __device__ float NodeBase_distance_to(Node *node, Node *other) {
// float dx = node->pos_x - other->pos_x;
// float dy = node->pos_y - other->pos_y;
// float dist_sq = dx * dx + dy * dy;
// return sqrt(dist_sq);
// }
__device__ void NodeBase_add_spring(NodeBase *node, SpringBase *spring) {
assert(node != NULL);
int idx = atomicAdd(&node->num_springs, 1);
assert(idx + 1 <= kMaxDegree);
node->springs[idx] = spring;
// assert(spring->p1 == node || spring->p2 == node);
}
__device__ void new_Spring(SpringBase *spring, NodeBase *p1, NodeBase *p2,
float spring_factor, float max_force) {
spring->is_active = true;
spring->p1 = p1;
spring->p2 = p2;
spring->factor = spring_factor;
spring->force = 0.0f;
spring->max_force = max_force;
spring->initial_length = p1->distance_to(p2);
spring->delete_flag = false;
// if (!(spring->initial_length > 0.0f))
// printf("%f \n", spring->initial_length);
assert(spring->initial_length > 0.0f);
NodeBase_add_spring(p1, spring);
NodeBase_add_spring(p2, spring);
}
__device__ void NodeBase_remove_spring(NodeBase *node, SpringBase *spring) {
for (int i = 0; i < kMaxDegree; ++i) {
if (node->springs[i] == spring) {
node->springs[i] = NULL;
if (atomicSub(&node->num_springs, 1) == 1) {
// Deleted last spring.
node->type = 0;
}
return;
}
}
// Spring not found.
assert(false);
}
__device__ void AnchorPullNode_pull(NodeBase *node) {
node->pos_x += node->vel_x * kDt;
node->pos_y += node->vel_y * kDt;
}
__device__ void Spring_self_destruct(SpringBase *spring) {
NodeBase_remove_spring(spring->get_p1(), spring);
NodeBase_remove_spring(spring->get_p2(), spring);
spring->is_active = false;
}
__device__ void Spring_compute_force(SpringBase *spring) {
float dist = spring->get_p1()->distance_to(spring->get_p2());
float displacement = max(0.0f, dist - spring->get_init_len());
spring->update_force(displacement);
if (spring->is_max_force()) {
spring->get_p1()->remove_spring(spring);
spring->get_p2()->remove_spring(spring);
spring->deactivate();
// Spring_self_destruct(spring);
}
}
__device__ void Node_move(NodeBase *node) {
float force_x = 0.0f;
float force_y = 0.0f;
for (int i = 0; i < kMaxDegree; ++i) {
SpringBase *s = node->spring(i);
if (s != NULL) {
NodeBase *from;
NodeBase *to;
if (s->get_p1() == node) {
from = node;
to = s->get_p2();
} else {
assert(s->get_p2() == node);
from = node;
to = s->get_p1( );
}
// Calculate unit vector.
float dist = to->distance_to(from);
float unit_x = to->unit_x(from, dist);
float unit_y = to->unit_y(from, dist);
// Apply force.
force_x += unit_x * s->get_force();
force_y += unit_y * s->get_force();
}
}
// Calculate new velocity and position.
node->update_vel_x(force_x);
node->update_vel_y(force_y);
node->update_pos_x(force_x);
node->update_pos_y(force_y);
}
__device__ void NodeBase_initialize_bfs(NodeBase *node) {
if (node->type == kTypeAnchorNode) {
node->set_distance(0);
} else {
node->set_distance(kMaxDistance); // should be int_max
}
}
__device__ bool dev_bfs_continue;
__device__ void NodeBase_bfs_visit(NodeBase *node, int distance) {
if (distance == node->get_distance()) {
// Continue until all vertices were visited.
dev_bfs_continue = true;
for (int i = 0; i < kMaxDegree; ++i) {
SpringBase *spring = node->spring(i);
if (spring != NULL) {
// Find neighboring vertices.
NodeBase *n;
if (node == spring->get_p1()) {
n = spring->get_p2();
} else {
n = spring->get_p1();
}
if (n->get_distance() == kMaxDistance) {
// Set distance on neighboring vertex if unvisited.
n->set_distance(distance + 1);
}
}
}
}
}
__device__ void Spring_bfs_delete(SpringBase *spring) {
if (spring->delete_flag) {
spring->get_p1()->remove_spring(spring);
spring->get_p2()->remove_spring(spring);
spring->deactivate();
}
}
__device__ void NodeBase_bfs_set_delete_flags(NodeBase *node) {
if (node->distance == kMaxDistance) { // should be int_max
for (int i = 0; i < kMaxDegree; ++i) {
SpringBase *spring = node->spring(i);
if (spring != NULL) {
spring->delete_flag = true;
//Spring_bfs_delete(spring);
}
}
}
}
// Only for rendering and checksum computation.
__device__ int dev_num_springs;
__device__ SpringInfo dev_spring_info[kMaxSprings];
int host_num_springs;
SpringInfo host_spring_info[kMaxSprings];
__device__ void Spring_add_to_rendering_array(SpringBase *spring) {
int idx = atomicAdd(&dev_num_springs, 1);
dev_spring_info[idx].p1_x = spring->p1->pos_x;
dev_spring_info[idx].p1_y = spring->p1->pos_y;
dev_spring_info[idx].p2_x = spring->p2->pos_x;
dev_spring_info[idx].p2_y = spring->p2->pos_y;
dev_spring_info[idx].force = spring->force;
dev_spring_info[idx].max_force = spring->max_force;
}
__global__ void kernel_AnchorPullNode_pull() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type == kTypeAnchorPullNode) {
dev_nodes[i]->pull();
}
}
}
__global__ void kernel_Node_move() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type == kTypeNode) {
Node_move(dev_nodes[i]);
}
}
}
__global__ void kernel_NodeBase_initialize_bfs() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type != 0) {
NodeBase_initialize_bfs(dev_nodes[i]);
}
}
}
__global__ void kernel_NodeBase_bfs_visit(int dist) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type != 0) {
NodeBase_bfs_visit(dev_nodes[i], dist);
}
}
}
__global__ void kernel_NodeBase_bfs_set_delete_flags() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes;
i += blockDim.x * gridDim.x) {
if (dev_nodes[i]->type != 0) {
NodeBase_bfs_set_delete_flags(dev_nodes[i]);
}
}
}
__global__ void kernel_Spring_compute_force() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings;
i += blockDim.x * gridDim.x) {
if (dev_springs[i]->get_is_active()) {
Spring_compute_force(dev_springs[i]);
}
}
}
__global__ void kernel_Spring_bfs_delete() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings;
i += blockDim.x * gridDim.x) {
if (dev_springs[i]->get_is_active()) {
Spring_bfs_delete(dev_springs[i]);
}
}
}
__global__ void kernel_Spring_add_to_rendering_array() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings;
i += blockDim.x * gridDim.x) {
if (dev_springs[i]->get_is_active()) {
Spring_add_to_rendering_array(dev_springs[i]);
}
}
}
void initialize_nodes(obj_alloc *alloc) {
for (int i = 0; i < kMaxNodes; i += 1) {
dev_nodes[i] = (NodeBase *)alloc->my_new<Node>();
// assert(dev_nodes[i] != NULL);
dev_nodes[i]->type = 0;
}
}
void initialize_springs(obj_alloc *alloc) {
for (int i = 0; i < kMaxSprings; i += 1) {
dev_springs[i] = (SpringBase *)alloc->my_new<Spring>();
assert(dev_springs[i] != NULL);
dev_springs[i]->is_active = false;
}
}
void transfer_data() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_springs, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
gpuErrchk(cudaDeviceSynchronize());
kernel_Spring_add_to_rendering_array<<<128, 128>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(&host_num_springs, dev_num_springs, sizeof(int), 0,
cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(host_spring_info, dev_spring_info,
sizeof(SpringInfo) * host_num_springs, 0,
cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
}
float checksum() {
transfer_data();
float result = 0.0f;
for (int i = 0; i < host_num_springs; ++i) {
result += host_spring_info[i].p1_x * host_spring_info[i].p2_y *
host_spring_info[i].force;
}
return result;
}
void compute() {
kernel_Spring_compute_force<<<(kMaxSprings + kThreads - 1) / kThreads,
kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Node_move<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void bfs_and_delete() {
// Perform BFS to check reachability.
kernel_NodeBase_initialize_bfs<<<(kMaxNodes + kThreads - 1) / kThreads,
kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
for (int i = 0; i < kMaxDistance; ++i) {
bool continue_flag = false;
cudaMemcpyToSymbol(dev_bfs_continue, &continue_flag, sizeof(bool), 0,
cudaMemcpyHostToDevice);
kernel_NodeBase_bfs_visit<<<(kMaxNodes + kThreads - 1) / kThreads,
kThreads>>>(i);
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(&continue_flag, dev_bfs_continue, sizeof(bool), 0,
cudaMemcpyDeviceToHost);
if (!continue_flag) break;
}
// Delete springs (and nodes).
kernel_NodeBase_bfs_set_delete_flags<<<
(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Spring_bfs_delete<<<(kMaxSprings + kThreads - 1) / kThreads,
kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void step() {
kernel_AnchorPullNode_pull<<<(kMaxNodes + kThreads - 1) / kThreads,
kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
for (int i = 0; i < kNumComputeIterations; ++i) {
compute();
}
bfs_and_delete();
}
void initialize_memory(obj_alloc *alloc) {
initialize_nodes(alloc);
initialize_springs(alloc);
}
__device__ IndexT dev_tmp_nodes[kMaxNodes];
__device__ IndexT dev_node_counter;
__global__ void kernel_create_nodes(DsNode *nodes, int num_nodes) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_nodes;
i += blockDim.x * gridDim.x) {
int idx = atomicAdd(&dev_node_counter, 1);
assert(dev_nodes != NULL);
dev_tmp_nodes[i] = idx;
assert(dev_nodes[idx] != NULL);
if (nodes[i].type == kTypeNode) {
new_Node(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y,
nodes[i].mass);
} else if (nodes[i].type == kTypeAnchorPullNode) {
new_AnchorPullNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y,
nodes[i].vel_x, nodes[i].vel_y);
} else if (nodes[i].type == kTypeAnchorNode) {
new_AnchorNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y);
} else {
assert(false);
}
}
}
__global__ void kernel_create_springs(DsSpring *springs, int num_springs) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_springs;
i += blockDim.x * gridDim.x) {
assert(dev_springs[i] != nullptr);
new_Spring(dev_springs[i], dev_nodes[dev_tmp_nodes[springs[i].p1]],
dev_nodes[dev_tmp_nodes[springs[i].p2]],
springs[i].spring_factor, springs[i].max_force);
// printf("%p \n", dev_springs[i]);
}
}
void load_dataset(Dataset &dataset) {
DsNode *host_nodes;
cudaMalloc(&host_nodes, sizeof(DsNode) * dataset.nodes.size());
cudaMemcpy(host_nodes, dataset.nodes.data(),
sizeof(DsNode) * dataset.nodes.size(), cudaMemcpyHostToDevice);
DsSpring *host_springs;
cudaMalloc(&host_springs, sizeof(DsSpring) * dataset.springs.size());
cudaMemcpy(host_springs, dataset.springs.data(),
sizeof(DsSpring) * dataset.springs.size(),
cudaMemcpyHostToDevice);
gpuErrchk(cudaDeviceSynchronize());
IndexT zero = 0;
cudaMemcpyToSymbol(dev_node_counter, &zero, sizeof(IndexT), 0,
cudaMemcpyHostToDevice);
gpuErrchk(cudaDeviceSynchronize());
assert(dataset.nodes.size() == kMaxNodes);
// kernel_create_nodes1<<<(kMaxNodes + kThreads - 1) / kThreads,
// kThreads>>>(
// host_nodes, dataset.nodes.size());
kernel_create_nodes<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>(
host_nodes, dataset.nodes.size());
gpuErrchk(cudaDeviceSynchronize());
// kernel_create_spring1<<<(kMaxSprings + kThreads - 1) / kThreads,
// kThreads>>>(
// host_nodes, dataset.springs.size());
kernel_create_springs<<<(kMaxSprings + kThreads - 1) / kThreads,
kThreads>>>(host_springs, dataset.springs.size());
gpuErrchk(cudaDeviceSynchronize());
cudaFree(host_nodes);
cudaFree(host_springs);
}
int main(int /*argc*/, char ** argv) {
// Allocate memory.
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[1]));
high_resolution_clock::time_point t1 = high_resolution_clock::now();
dev_nodes = (NodeBase **) my_obj_alloc.calloc<NodeBase *> (kMaxNodes);
dev_springs = (SpringBase **) my_obj_alloc.calloc<SpringBase *> (kMaxSprings);
// cudaMalloc(&dev_nodes, sizeof(NodeBase *) * kMaxNodes);
// cudaMemcpyToSymbol(dev_nodes, &host_nodes, sizeof(Node *), 0,
// cudaMemcpyHostToDevice);
// assert(dev_nodes != NULL);
// printf("%p\n", dev_nodes);
// Spring *host_springs;
// cudaMalloc(&dev_springs, sizeof(SpringBase *) * kMaxSprings);
// cudaMemcpyToSymbol(dev_springs, &host_springs, sizeof(Spring *), 0,
// cudaMemcpyHostToDevice);
initialize_memory(&my_obj_alloc);
high_resolution_clock::time_point t2 = high_resolution_clock::now();
my_obj_alloc.toDevice();
high_resolution_clock::time_point t3 = high_resolution_clock::now();
duration<double> alloc_time = duration_cast<duration<double>>(t2 - t1);
duration<double> vptr_time = duration_cast<duration<double>>(t3 - t2);
printf("alloc_time : %f \nvptr patching : %f \n",alloc_time.count(),vptr_time.count() );
printf("number of objs:%d\n", kMaxNodes + kMaxSprings);
Dataset dataset;
random_dataset(dataset);
load_dataset(dataset);
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumSteps; ++i) {
#ifndef NDEBUG
printf("%i\n", i);
#endif // NDEBUG
step();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
printf("%lu\n", micros);
#ifndef NDEBUG
printf("Checksum: %f\n", checksum());
#endif // NDEBUG
}
|
588e7cd1b82cfa782077a819ea7adc675598d24d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <iomanip>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// #define imin(a, b) (a<b?a:b)
//X,Y,Z
const int dimX = 10;
const int dimY = 10;
const int dimZ = 10;
const int SIZE = dimX * dimY * dimZ;
//
const int threadPerBlock = 32;
//
const int times = 90;
//stencil
__device__ const float BORDER = 0.0;
int count = 0;
//,
int blockPerGrid(const int dim, const int threadPerBlock)
{
//stencil
int temp = dim / threadPerBlock;
if (dim % threadPerBlock != 0) {
temp += 1;
}
return temp;
}
//
#define CHECK_ERROR(error) checkCudaError(error, __FILE__, __LINE__)
#define CHECK_STATE(msg) checkCudaState(msg, __FILE__, __LINE__)
inline void checkCudaError(hipError_t error, const char *file, const int line)
{
if (error != hipSuccess) {
std::cerr << "CUDA CALL FAILED:" << file << "( " << line << ")- " << hipGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
}
inline void checkCudaState(const char *msg, const char *file, const int line)
{
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
std::cerr << "---" << msg << " Error---" << std::endl;
std::cerr << file << "( " << line << ")- " << hipGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
}
//xyz
__device__ __host__ int offset(int x, int y, int z)
{
return (((x + dimX) % dimX) + ((y + dimY) % dimY) * dimX + ((z + dimZ) % dimZ) * dimX * dimY);
}
__global__ void kernel(double *dev_grid_in, double *dev_grid_out)
{
// __shared__ double cache[threadPerBlock][threadPerBlock][1];
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
//stencil
double center = dev_grid_in[offset(x, y, z)];
double up = (z < (dimZ - 1)) ? dev_grid_in[offset(x, y, z + 1)] : BORDER;
double down = (z > 0) ? dev_grid_in[offset(x, y, z - 1)] : BORDER;
double west = (x > 0) ? dev_grid_in[offset(x - 1, y, z)] : BORDER;
double east = (x < (dimX - 1)) ? dev_grid_in[offset(x + 1, y, z)] : BORDER;
double south = (y > 0) ? dev_grid_in[offset(x, y - 1, z)] : BORDER;
double north = (y < (dimY - 1)) ? dev_grid_in[offset(x, y + 1, z)] : BORDER;
// dev_grid_out[offset(x, y, z)] = 1.0;
dev_grid_out[offset(x, y, z)] = (center + up + down + west + east + south + north) * (1.0 / 7.0);
// cache[threadIdx.x][threadIdx.y][threadIdx.z] = (center + up + down + west + east + south + north) * (1.0 / 7.0);
// __syncthreads();
// dev_grid_out[offset(x, y, z)] = cache[threadIdx.x][threadIdx.y][threadIdx.z];
}
//
void init(double *grid, int dimX, int dimY, int dimZ)
{
for (int z=0; z<dimZ; ++z) {
for (int y=0; y<dimY; ++y) {
for (int x=0; x<dimX; ++x) {
if ((x*y*z == 0) || (x == dimX-1) || (y == dimY-1) || (z == dimZ-1)) {
grid[offset(x, y, z)] = 7;
}
else {
grid[offset(x, y, z)] = 0;
// grid[offset(x, y, z)] = count;
}
count++;
}
}
}
}
void print(double *grid)
{
for (int z=0; z<dimZ; ++z) {
std::cout << z << ":\n\n";
for (int y=0; y<dimY; ++y) {
for (int x=0; x<dimX; ++x) {
std::cout << std::fixed << std::setprecision(3) << grid[offset(x, y, z)] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
void debug(int test, std::string str)
{
if (test != 0) {
std::cout << "-----------" << str << "--------------" << std::endl;
std::cout << test << std::endl;
}
else {
std::cout << "-----------" << str << "--------------" << std::endl;
}
}
int main(void)
{
CHECK_ERROR(hipSetDevice(0));
//blocks10241
dim3 blocks(threadPerBlock, threadPerBlock, 1);
dim3 grids(blockPerGrid(dimX, blocks.x), blockPerGrid(dimY, blocks.y), blockPerGrid(dimZ, blocks.z));
double *grid_in, *grid_out;
grid_in = (double *)malloc(SIZE * sizeof(double));
grid_out = (double *)malloc(SIZE * sizeof(double));
double *dev_grid_in, *dev_grid_out;
CHECK_ERROR(hipMalloc((void**)&dev_grid_in, SIZE * sizeof(double)));
CHECK_ERROR(hipMalloc((void**)&dev_grid_out, SIZE * sizeof(double)));
init(grid_in, dimX, dimY, dimZ);
init(grid_out, dimX, dimY, dimZ);
// debug(0, "input");
// print(grid_in);
//GPU
hipEvent_t start, stop;
CHECK_ERROR(hipEventCreate(&start));
CHECK_ERROR(hipEventCreate(&stop));
CHECK_ERROR(hipEventRecord(start, 0));
CHECK_ERROR(hipEventSynchronize(start));
CHECK_ERROR(hipMemcpy(dev_grid_in, grid_in, SIZE * sizeof(double), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpy(dev_grid_out, grid_out, SIZE * sizeof(double), hipMemcpyHostToDevice));
for (int i=0; i<times; ++i) {
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(blocks), 0, 0, dev_grid_in, dev_grid_out);
std::swap(dev_grid_in, dev_grid_out);
}
hipDeviceSynchronize();
CHECK_STATE("kernel call");
CHECK_ERROR(hipMemcpy(grid_in, dev_grid_in, SIZE * sizeof(double), hipMemcpyDeviceToHost));
//
CHECK_ERROR(hipEventRecord(stop, 0));
CHECK_ERROR(hipEventSynchronize(stop));
float elapsedTime;
CHECK_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
debug(0, "output");
print(grid_in);
std::cout << "Time elapsed: " << std::fixed << std::setprecision(6) << elapsedTime << " ms" << std::endl;
CHECK_ERROR(hipEventDestroy(start));
CHECK_ERROR(hipEventDestroy(stop));
free(grid_in);
free(grid_out);
CHECK_ERROR(hipFree(dev_grid_in));
CHECK_ERROR(hipFree(dev_grid_out));
getchar();
return 0;
}
| 588e7cd1b82cfa782077a819ea7adc675598d24d.cu | #include <iostream>
#include <string>
#include <iomanip>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// #define imin(a, b) (a<b?a:b)
//定义X,Y,Z各维的长度
const int dimX = 10;
const int dimY = 10;
const int dimZ = 10;
const int SIZE = dimX * dimY * dimZ;
//设置每个线程块中线程数量,此处设置三维一样
const int threadPerBlock = 32;
//设置迭代次数
const int times = 90;
//设置stencil边界处邻居的值
__device__ const float BORDER = 0.0;
int count = 0;
//设定线程格中线程块的数量, 避免启动过多线程块
int blockPerGrid(const int dim, const int threadPerBlock)
{
//由于暂时一个线程只计算一个stencil,所以暂时不能指定线程块的限制
int temp = dim / threadPerBlock;
if (dim % threadPerBlock != 0) {
temp += 1;
}
return temp;
}
//错误处理
#define CHECK_ERROR(error) checkCudaError(error, __FILE__, __LINE__)
#define CHECK_STATE(msg) checkCudaState(msg, __FILE__, __LINE__)
inline void checkCudaError(cudaError_t error, const char *file, const int line)
{
if (error != cudaSuccess) {
std::cerr << "CUDA CALL FAILED:" << file << "( " << line << ")- " << cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
}
inline void checkCudaState(const char *msg, const char *file, const int line)
{
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::cerr << "---" << msg << " Error---" << std::endl;
std::cerr << file << "( " << line << ")- " << cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
}
//计算线程与元素的唯一偏移,以x为行,y为列,z为高
__device__ __host__ int offset(int x, int y, int z)
{
return (((x + dimX) % dimX) + ((y + dimY) % dimY) * dimX + ((z + dimZ) % dimZ) * dimX * dimY);
}
__global__ void kernel(double *dev_grid_in, double *dev_grid_out)
{
// __shared__ double cache[threadPerBlock][threadPerBlock][1];
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
//设置stencil中各元素值
double center = dev_grid_in[offset(x, y, z)];
double up = (z < (dimZ - 1)) ? dev_grid_in[offset(x, y, z + 1)] : BORDER;
double down = (z > 0) ? dev_grid_in[offset(x, y, z - 1)] : BORDER;
double west = (x > 0) ? dev_grid_in[offset(x - 1, y, z)] : BORDER;
double east = (x < (dimX - 1)) ? dev_grid_in[offset(x + 1, y, z)] : BORDER;
double south = (y > 0) ? dev_grid_in[offset(x, y - 1, z)] : BORDER;
double north = (y < (dimY - 1)) ? dev_grid_in[offset(x, y + 1, z)] : BORDER;
// dev_grid_out[offset(x, y, z)] = 1.0;
dev_grid_out[offset(x, y, z)] = (center + up + down + west + east + south + north) * (1.0 / 7.0);
// cache[threadIdx.x][threadIdx.y][threadIdx.z] = (center + up + down + west + east + south + north) * (1.0 / 7.0);
// __syncthreads();
// dev_grid_out[offset(x, y, z)] = cache[threadIdx.x][threadIdx.y][threadIdx.z];
}
//初始化输入,输出
void init(double *grid, int dimX, int dimY, int dimZ)
{
for (int z=0; z<dimZ; ++z) {
for (int y=0; y<dimY; ++y) {
for (int x=0; x<dimX; ++x) {
if ((x*y*z == 0) || (x == dimX-1) || (y == dimY-1) || (z == dimZ-1)) {
grid[offset(x, y, z)] = 7;
}
else {
grid[offset(x, y, z)] = 0;
// grid[offset(x, y, z)] = count;
}
count++;
}
}
}
}
void print(double *grid)
{
for (int z=0; z<dimZ; ++z) {
std::cout << z << ":\n\n";
for (int y=0; y<dimY; ++y) {
for (int x=0; x<dimX; ++x) {
std::cout << std::fixed << std::setprecision(3) << grid[offset(x, y, z)] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
void debug(int test, std::string str)
{
if (test != 0) {
std::cout << "-----------" << str << "--------------" << std::endl;
std::cout << test << std::endl;
}
else {
std::cout << "-----------" << str << "--------------" << std::endl;
}
}
int main(void)
{
CHECK_ERROR(cudaSetDevice(0));
//由于blocks不能大于1024,所以最后一维设备为1
dim3 blocks(threadPerBlock, threadPerBlock, 1);
dim3 grids(blockPerGrid(dimX, blocks.x), blockPerGrid(dimY, blocks.y), blockPerGrid(dimZ, blocks.z));
double *grid_in, *grid_out;
grid_in = (double *)malloc(SIZE * sizeof(double));
grid_out = (double *)malloc(SIZE * sizeof(double));
double *dev_grid_in, *dev_grid_out;
CHECK_ERROR(cudaMalloc((void**)&dev_grid_in, SIZE * sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&dev_grid_out, SIZE * sizeof(double)));
init(grid_in, dimX, dimY, dimZ);
init(grid_out, dimX, dimY, dimZ);
// debug(0, "input");
// print(grid_in);
//统计用于GPU计算的时间
cudaEvent_t start, stop;
CHECK_ERROR(cudaEventCreate(&start));
CHECK_ERROR(cudaEventCreate(&stop));
CHECK_ERROR(cudaEventRecord(start, 0));
CHECK_ERROR(cudaEventSynchronize(start));
CHECK_ERROR(cudaMemcpy(dev_grid_in, grid_in, SIZE * sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(dev_grid_out, grid_out, SIZE * sizeof(double), cudaMemcpyHostToDevice));
for (int i=0; i<times; ++i) {
kernel<<<grids, blocks>>>(dev_grid_in, dev_grid_out);
std::swap(dev_grid_in, dev_grid_out);
}
cudaDeviceSynchronize();
CHECK_STATE("kernel call");
CHECK_ERROR(cudaMemcpy(grid_in, dev_grid_in, SIZE * sizeof(double), cudaMemcpyDeviceToHost));
//计算统计的时间
CHECK_ERROR(cudaEventRecord(stop, 0));
CHECK_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
debug(0, "output");
print(grid_in);
std::cout << "Time elapsed: " << std::fixed << std::setprecision(6) << elapsedTime << " ms" << std::endl;
CHECK_ERROR(cudaEventDestroy(start));
CHECK_ERROR(cudaEventDestroy(stop));
free(grid_in);
free(grid_out);
CHECK_ERROR(cudaFree(dev_grid_in));
CHECK_ERROR(cudaFree(dev_grid_out));
getchar();
return 0;
}
|
aeb317dda8abd5651d9989ab55bff6d697ae8969.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
return 0;
}
| aeb317dda8abd5651d9989ab55bff6d697ae8969.cu | /******************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
return 0;
}
|
2d033071c3a9f8de9008e66e8701f4af79a2c594.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define WARP_SIZE 32 // # of threads that are executed together (constant valid on most hardware)
/* Simple CUDA example showing:
1) how to sum the values of an array in parallel
2) how to add a scaler to values of an array in parallel
3) how to query GPU hardware
Compile with minimum archetecture specification of 30. Example:
nvcc example.cu - o example -arch=sm_30
Author: Jordan Bonilla
*/
// Allow timing of functions
clock_t start,end;
/* Add "scalar" to every element of the input array in parallel */
// CPU entry point for kernel to add "scalar" to every element of the input array
__global__ void _cuda_add_scalar(int *in, int scalar, int n)
{
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
while(globalIdx < n)
{
in[globalIdx] = in[globalIdx] + scalar;
globalIdx += blockDim.x * gridDim.x;
}
} | 2d033071c3a9f8de9008e66e8701f4af79a2c594.cu | #include "includes.h"
#define WARP_SIZE 32 // # of threads that are executed together (constant valid on most hardware)
/* Simple CUDA example showing:
1) how to sum the values of an array in parallel
2) how to add a scaler to values of an array in parallel
3) how to query GPU hardware
Compile with minimum archetecture specification of 30. Example:
nvcc example.cu - o example -arch=sm_30
Author: Jordan Bonilla
*/
// Allow timing of functions
clock_t start,end;
/* Add "scalar" to every element of the input array in parallel */
// CPU entry point for kernel to add "scalar" to every element of the input array
__global__ void _cuda_add_scalar(int *in, int scalar, int n)
{
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
while(globalIdx < n)
{
in[globalIdx] = in[globalIdx] + scalar;
globalIdx += blockDim.x * gridDim.x;
}
} |
846d6d148c8d59739528fbfe55349eef747cdbbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rbbox_overlaps_kernel.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float trangle_area(float * a, float * b, float * c) {
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0;
}
__device__ inline float area(float * int_pts, int num_of_inter) {
float area = 0.0;
for(int i = 0;i < num_of_inter - 2;i++) {
area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4));
}
return area;
}
__device__ inline void reorder_pts(float * int_pts, int num_of_inter) {
if(num_of_inter > 0) {
float center[2];
center[0] = 0.0;
center[1] = 0.0;
for(int i = 0;i < num_of_inter;i++) {
center[0] += int_pts[2 * i];
center[1] += int_pts[2 * i + 1];
}
center[0] /= num_of_inter;
center[1] /= num_of_inter;
float vs[16];
float v[2];
float d;
for(int i = 0;i < num_of_inter;i++) {
v[0] = int_pts[2 * i]-center[0];
v[1] = int_pts[2 * i + 1]-center[1];
d = sqrt(v[0] * v[0] + v[1] * v[1]);
v[0] = v[0] / d;
v[1] = v[1] / d;
if(v[1] < 0) {
v[0]= - 2 - v[0];
}
vs[i] = v[0];
}
float temp,tx,ty;
int j;
for(int i=1;i<num_of_inter;++i){
if(vs[i-1]>vs[i]){
temp = vs[i];
tx = int_pts[2*i];
ty = int_pts[2*i+1];
j=i;
while(j>0&&vs[j-1]>temp){
vs[j] = vs[j-1];
int_pts[j*2] = int_pts[j*2-2];
int_pts[j*2+1] = int_pts[j*2-1];
j--;
}
vs[j] = temp;
int_pts[j*2] = tx;
int_pts[j*2+1] = ty;
}
}
}
}
__device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) {
float a[2];
float b[2];
float c[2];
float d[2];
float area_abc, area_abd, area_cda, area_cdb;
a[0] = pts1[2 * i];
a[1] = pts1[2 * i + 1];
b[0] = pts1[2 * ((i + 1) % 4)];
b[1] = pts1[2 * ((i + 1) % 4) + 1];
c[0] = pts2[2 * j];
c[1] = pts2[2 * j + 1];
d[0] = pts2[2 * ((j + 1) % 4)];
d[1] = pts2[2 * ((j + 1) % 4) + 1];
area_abc = trangle_area(a, b, c);
area_abd = trangle_area(a, b, d);
if(area_abc * area_abd >= -1e-5) {
return false;
}
area_cda = trangle_area(c, d, a);
area_cdb = area_cda + area_abc - area_abd;
if (area_cda * area_cdb >= -1e-5) {
return false;
}
float t = area_cda / (area_abd - area_abc);
float dx = t * (b[0] - a[0]);
float dy = t * (b[1] - a[1]);
temp_pts[0] = a[0] + dx;
temp_pts[1] = a[1] + dy;
return true;
}
__device__ inline bool inrect(float pt_x, float pt_y, float * pts) {
double ab[2];
double ad[2];
double ap[2];
double abab;
double abap;
double adad;
double adap;
ab[0] = pts[2] - pts[0];
ab[1] = pts[3] - pts[1];
ad[0] = pts[6] - pts[0];
ad[1] = pts[7] - pts[1];
ap[0] = pt_x - pts[0];
ap[1] = pt_y - pts[1];
abab = ab[0] * ab[0] + ab[1] * ab[1];
abap = ab[0] * ap[0] + ab[1] * ap[1];
adad = ad[0] * ad[0] + ad[1] * ad[1];
adap = ad[0] * ap[0] + ad[1] * ap[1];
bool result = (abab - abap >= -1) and (abap >= -1) and (adad - adap >= -1) and (adap >= -1);
return result;
}
__device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) {
int num_of_inter = 0;
for(int i = 0;i < 4;i++) {
if(inrect(pts1[2 * i], pts1[2 * i + 1], pts2)) {
int_pts[num_of_inter * 2] = pts1[2 * i];
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
num_of_inter++;
}
if(inrect(pts2[2 * i], pts2[2 * i + 1], pts1)) {
int_pts[num_of_inter * 2] = pts2[2 * i];
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
num_of_inter++;
}
}
float temp_pts[2];
for(int i = 0;i < 4;i++) {
for(int j = 0;j < 4;j++) {
bool has_pts = inter2line(pts1, pts2, i, j, temp_pts);
if(has_pts) {
int_pts[num_of_inter * 2] = temp_pts[0];
int_pts[num_of_inter * 2 + 1] = temp_pts[1];
num_of_inter++;
}
}
}
return num_of_inter;
}
__device__ inline void convert_region(float * pts , float const * const region) {
float angle = region[4];
float a_cos = cos(angle/180.0*3.1415926535);
float a_sin = sin(angle/180.0*3.1415926535);
float ctr_x = region[0];
float ctr_y = region[1];
float w = region[2];
float h = region[3];
float pts_x[4];
float pts_y[4];
pts_x[0] = - w / 2;
pts_x[1] = w / 2;
pts_x[2] = w / 2;
pts_x[3] = - w / 2;
pts_y[0] = - h / 2;
pts_y[1] = - h / 2;
pts_y[2] = h / 2;
pts_y[3] = h / 2;
for(int i = 0;i < 4;i++) {
pts[7 - 2 * i - 1] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x;
pts[7 - 2 * i] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y;
}
}
__device__ inline float inter(float const * const region1, float const * const region2) {
float pts1[8];
float pts2[8];
float int_pts[16];
int num_of_inter;
convert_region(pts1, region1);
convert_region(pts2, region2);
num_of_inter = inter_pts(pts1, pts2, int_pts);
reorder_pts(int_pts, num_of_inter);
return area(int_pts, num_of_inter);
}
__device__ inline float devRotateIoU(float const * const region1, float const * const region2) {
if((fabs(region1[0] - region2[0]) < 1e-5) && (fabs(region1[1] - region2[1]) < 1e-5) && (fabs(region1[2] - region2[2]) < 1e-5) && (fabs(region1[3] - region2[3]) < 1e-5) && (fabs(region1[4] - region2[4]) < 1e-5)) {
return 1.0;
}
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
float result = area_inter / (area1 + area2 - area_inter);
if(result < 0) {
result = 0.0;
}
return result;
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int col_start = blockIdx.y;
const int row_start = blockIdx.x;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
__shared__ float block_query_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 5 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_query_boxes[threadIdx.x * 5 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_query_boxes[threadIdx.x * 5 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_query_boxes[threadIdx.x * 5 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_query_boxes[threadIdx.x * 5 + 4] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5);
}
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(hipMalloc(&boxes_dev,
n * 5 * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes,
n * 5 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&query_boxes_dev,
k * 5 * sizeof(float)));
CUDA_CHECK(hipMemcpy(query_boxes_dev,
query_boxes,
k * 5 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&overlaps_dev,
n * k * sizeof(float)));
if (true){}
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( overlaps_kernel), dim3(blocks), dim3(threads), 0, 0, n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(hipMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(overlaps_dev));
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(query_boxes_dev));
}
| 846d6d148c8d59739528fbfe55349eef747cdbbe.cu |
#include "rbbox_overlaps_kernel.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float trangle_area(float * a, float * b, float * c) {
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0;
}
__device__ inline float area(float * int_pts, int num_of_inter) {
float area = 0.0;
for(int i = 0;i < num_of_inter - 2;i++) {
area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4));
}
return area;
}
__device__ inline void reorder_pts(float * int_pts, int num_of_inter) {
if(num_of_inter > 0) {
float center[2];
center[0] = 0.0;
center[1] = 0.0;
for(int i = 0;i < num_of_inter;i++) {
center[0] += int_pts[2 * i];
center[1] += int_pts[2 * i + 1];
}
center[0] /= num_of_inter;
center[1] /= num_of_inter;
float vs[16];
float v[2];
float d;
for(int i = 0;i < num_of_inter;i++) {
v[0] = int_pts[2 * i]-center[0];
v[1] = int_pts[2 * i + 1]-center[1];
d = sqrt(v[0] * v[0] + v[1] * v[1]);
v[0] = v[0] / d;
v[1] = v[1] / d;
if(v[1] < 0) {
v[0]= - 2 - v[0];
}
vs[i] = v[0];
}
float temp,tx,ty;
int j;
for(int i=1;i<num_of_inter;++i){
if(vs[i-1]>vs[i]){
temp = vs[i];
tx = int_pts[2*i];
ty = int_pts[2*i+1];
j=i;
while(j>0&&vs[j-1]>temp){
vs[j] = vs[j-1];
int_pts[j*2] = int_pts[j*2-2];
int_pts[j*2+1] = int_pts[j*2-1];
j--;
}
vs[j] = temp;
int_pts[j*2] = tx;
int_pts[j*2+1] = ty;
}
}
}
}
__device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) {
float a[2];
float b[2];
float c[2];
float d[2];
float area_abc, area_abd, area_cda, area_cdb;
a[0] = pts1[2 * i];
a[1] = pts1[2 * i + 1];
b[0] = pts1[2 * ((i + 1) % 4)];
b[1] = pts1[2 * ((i + 1) % 4) + 1];
c[0] = pts2[2 * j];
c[1] = pts2[2 * j + 1];
d[0] = pts2[2 * ((j + 1) % 4)];
d[1] = pts2[2 * ((j + 1) % 4) + 1];
area_abc = trangle_area(a, b, c);
area_abd = trangle_area(a, b, d);
if(area_abc * area_abd >= -1e-5) {
return false;
}
area_cda = trangle_area(c, d, a);
area_cdb = area_cda + area_abc - area_abd;
if (area_cda * area_cdb >= -1e-5) {
return false;
}
float t = area_cda / (area_abd - area_abc);
float dx = t * (b[0] - a[0]);
float dy = t * (b[1] - a[1]);
temp_pts[0] = a[0] + dx;
temp_pts[1] = a[1] + dy;
return true;
}
__device__ inline bool inrect(float pt_x, float pt_y, float * pts) {
double ab[2];
double ad[2];
double ap[2];
double abab;
double abap;
double adad;
double adap;
ab[0] = pts[2] - pts[0];
ab[1] = pts[3] - pts[1];
ad[0] = pts[6] - pts[0];
ad[1] = pts[7] - pts[1];
ap[0] = pt_x - pts[0];
ap[1] = pt_y - pts[1];
abab = ab[0] * ab[0] + ab[1] * ab[1];
abap = ab[0] * ap[0] + ab[1] * ap[1];
adad = ad[0] * ad[0] + ad[1] * ad[1];
adap = ad[0] * ap[0] + ad[1] * ap[1];
bool result = (abab - abap >= -1) and (abap >= -1) and (adad - adap >= -1) and (adap >= -1);
return result;
}
__device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) {
int num_of_inter = 0;
for(int i = 0;i < 4;i++) {
if(inrect(pts1[2 * i], pts1[2 * i + 1], pts2)) {
int_pts[num_of_inter * 2] = pts1[2 * i];
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
num_of_inter++;
}
if(inrect(pts2[2 * i], pts2[2 * i + 1], pts1)) {
int_pts[num_of_inter * 2] = pts2[2 * i];
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
num_of_inter++;
}
}
float temp_pts[2];
for(int i = 0;i < 4;i++) {
for(int j = 0;j < 4;j++) {
bool has_pts = inter2line(pts1, pts2, i, j, temp_pts);
if(has_pts) {
int_pts[num_of_inter * 2] = temp_pts[0];
int_pts[num_of_inter * 2 + 1] = temp_pts[1];
num_of_inter++;
}
}
}
return num_of_inter;
}
__device__ inline void convert_region(float * pts , float const * const region) {
float angle = region[4];
float a_cos = cos(angle/180.0*3.1415926535);
float a_sin = sin(angle/180.0*3.1415926535);
float ctr_x = region[0];
float ctr_y = region[1];
float w = region[2];
float h = region[3];
float pts_x[4];
float pts_y[4];
pts_x[0] = - w / 2;
pts_x[1] = w / 2;
pts_x[2] = w / 2;
pts_x[3] = - w / 2;
pts_y[0] = - h / 2;
pts_y[1] = - h / 2;
pts_y[2] = h / 2;
pts_y[3] = h / 2;
for(int i = 0;i < 4;i++) {
pts[7 - 2 * i - 1] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x;
pts[7 - 2 * i] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y;
}
}
__device__ inline float inter(float const * const region1, float const * const region2) {
float pts1[8];
float pts2[8];
float int_pts[16];
int num_of_inter;
convert_region(pts1, region1);
convert_region(pts2, region2);
num_of_inter = inter_pts(pts1, pts2, int_pts);
reorder_pts(int_pts, num_of_inter);
return area(int_pts, num_of_inter);
}
__device__ inline float devRotateIoU(float const * const region1, float const * const region2) {
if((fabs(region1[0] - region2[0]) < 1e-5) && (fabs(region1[1] - region2[1]) < 1e-5) && (fabs(region1[2] - region2[2]) < 1e-5) && (fabs(region1[3] - region2[3]) < 1e-5) && (fabs(region1[4] - region2[4]) < 1e-5)) {
return 1.0;
}
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
float result = area_inter / (area1 + area2 - area_inter);
if(result < 0) {
result = 0.0;
}
return result;
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int col_start = blockIdx.y;
const int row_start = blockIdx.x;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
__shared__ float block_query_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 5 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_query_boxes[threadIdx.x * 5 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_query_boxes[threadIdx.x * 5 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_query_boxes[threadIdx.x * 5 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_query_boxes[threadIdx.x * 5 + 4] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5);
}
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(cudaMalloc(&boxes_dev,
n * 5 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes,
n * 5 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&query_boxes_dev,
k * 5 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(query_boxes_dev,
query_boxes,
k * 5 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&overlaps_dev,
n * k * sizeof(float)));
if (true){}
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
overlaps_kernel<<<blocks, threads>>>(n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(cudaMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(overlaps_dev));
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(query_boxes_dev));
}
|
2dbb33d1d0b38afa41d1b02787141a81a7f0c644.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by yanhao on 17-11-21.
//
#include <stdio.h>
#include <stdlib.h>
//#include "utils.h"
#include <iostream>
//#include "helper_cuda.h"
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include "mul_cublas.h"
// Helper function for using CUDA to add vectors in parallel.
//hipblasStatus_t
//addWithCuda(const hipblasHandle_t &handle, float *c, const float *a, const float *b, unsigned int WA, unsigned int HA,
// unsigned int WB,
// unsigned int HB) {
//
// float *dev_a = 0;
// float *dev_b = 0;
// float *dev_c = 0;
// hipError_t cudaStatus;
// hipblasStatus_t cublasStatus;
//
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = hipMalloc((void **) &dev_c, HA * WB * sizeof(float));
// if (cudaStatus != hipSuccess) {
// printf( "hipMalloc failed!");
// // Error;
// }
//
// cudaStatus = hipMalloc((void **) &dev_a, HA * WA * sizeof(float));
// if (cudaStatus != hipSuccess) {
// printf( "hipMalloc failed!");
// //goto Error;
// }
//
// cudaStatus = hipMalloc((void **) &dev_b, HB * WB * sizeof(float));
// if (cudaStatus != hipSuccess) {
// printf( "hipMalloc failed!");
// //goto Error;
// }
//
// hipblasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
// hipblasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
// //
// hipDeviceSynchronize();
//
// float alpha = 1.0;
// float beta = 0.0;
// cublasStatus = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a, HA, &beta, dev_c,
// HA);
//
// hipDeviceSynchronize();
// hipblasGetVector(HA * WB, sizeof(float), c, 1, dev_c, 1);
// //Error:
// hipFree(dev_c);
// hipFree(dev_a);
// hipFree(dev_b);
// return cublasStatus;
//}
//hipblasStatus_t
//addWithCuda(const hipblasHandle_t &handle, float *c, const float *a, const float *b, unsigned int WA, unsigned int HA,
// unsigned int WB,
// unsigned int HB) {
//
// float *dev_a = 0;
// float *dev_b = 0;
// float *dev_c = 0;
// hipError_t cudaStatus;
// hipblasStatus_t cublasStatus;
//
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = hipMalloc((void **) &dev_c, HA * WB * sizeof(float));
// if (cudaStatus != hipSuccess) {
// printf( "hipMalloc failed!");
// // Error;
// }
//
// cudaStatus = hipMalloc((void **) &dev_a, HA * WA * sizeof(float));
// if (cudaStatus != hipSuccess) {
// printf( "hipMalloc failed!");
// //goto Error;
// }
//
// cudaStatus = hipMalloc((void **) &dev_b, HB * WB * sizeof(float));
// if (cudaStatus != hipSuccess) {
// printf( "hipMalloc failed!");
// //goto Error;
// }
//
// hipblasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
// hipblasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
// //
// hipDeviceSynchronize();
//
// float alpha = 1.0;
// float beta = 0.0;
// clock_t start = clock();
//
// cublasStatus = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a, HA, &beta, dev_c,
// HA);
//
//
// clock_t time_used = clock() - start;
// printf("(GPU31) time:%ld\n", time_used);
// hipDeviceSynchronize();
// hipblasGetVector(HA * WB, sizeof(float), c, 1, dev_c, 1);
// for (int i = 0; i < 2; ++i) {
// for (int j = 0; j < 2; ++j) {
// printf("%f\n", c[i * 2 + j]);
// }
// }
// //Error:
// hipFree(dev_c);
// hipFree(dev_a);
// hipFree(dev_b);
// return cublasStatus;
//}
//addWithCuda2(const hipblasHandle_t &handle, float *dev_c, const float *dev_a, const float *dev_b, unsigned int WA,
// unsigned int HA, unsigned int WB,
// unsigned int HB) {
//
// float alpha = 1.0;
// float beta = 0.0;
// hipblasStatus_t cublasStatus = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a,
// HA, &beta, dev_c, HA);
//
//
//}
/**
const int WA = 3;
const int HA = 2;
const int WB = 3;
const int HB = 4;
float A[WA * HA] = {1, 2, 3,
4, 5, 6};
float B[WB * HB] = {1, 3, 9,
2, 1, 1,
3, 2, 5,
0, 2, 8
};
*/
hipblasStatus_t
addWithCuda5(const hipblasHandle_t &handle, float *c, const float *a, const float *b, unsigned int WA, unsigned int HA,
unsigned int WB,
unsigned int HB) {
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
printf("aaaaaaaaaaa!\n");
hipError_t cudaStatus;
hipblasStatus_t cublasStatus;
const int WC = HB;
const int HC = HA;
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void **) &dev_c, WC * HC * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
// Error;
}
cudaStatus = hipMalloc((void **) &dev_a, HA * WA * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
//goto Error;
}
cudaStatus = hipMalloc((void **) &dev_b, HB * WB * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
//goto Error;
}
hipblasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
hipblasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
//
hipDeviceSynchronize();
float alpha = 1.0;
float beta = 0.0;
//printf("aaaaaaaaaaa!\n");
int m = HB;
int n = HA;
int k = WB;
int lda = WB;
int ldb = WA;
int ldc = WC;
printf("%d,%d,%d,%d,%d,%d\n", m, n, k, lda, ldb, ldc);
clock_t start = clock();
cublasStatus = hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha, dev_b, ldb, dev_a, lda, &beta, dev_c,
ldc);
clock_t time_used = clock() - start;
printf("(GPU31) time:%ld\n", time_used);
hipDeviceSynchronize();
//printf("aaaaaaaaaaa!\n");
if (cublasStatus != HIPBLAS_STATUS_SUCCESS) {
printf("CUBLASdddddd\n");
if (cublasStatus == HIPBLAS_STATUS_NOT_INITIALIZED) {
printf("CUBLAS \n");
}
//return;
}
hipDeviceSynchronize();
hipDeviceSynchronize();
hipblasGetVector(HC * WC, sizeof(float), dev_c, 1, c, 1);
//Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cublasStatus;
}
template<typename T>
void gpu_memory_alloc(size_t len, T *&ptr) {
hipMalloc(&ptr, sizeof(T) * len);
}
//#define CHECK_EQ(val1, val2) ((val1)==(val2))
#define CHECK_NE(val1, val2) CHECK_OP(_NE, !=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(_LE, <=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(_LT, < , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(_GE, >=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(_GT, > , val1, val2)
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
std::cout<< " log:" << hipGetErrorString(error)<<std::endl; \
} while (0)
#define CUBLAS_CHECK(condition) \
do { \
hipblasStatus_t status = condition; \
CHECK_EQ(status, HIPBLAS_STATUS_SUCCESS) << " " \
<< caffe::cublasGetErrorString(status); \
} while (0)
#define CURAND_CHECK(condition) \
do { \
hiprandStatus_t status = condition; \
CHECK_EQ(status, HIPRAND_STATUS_SUCCESS) << " " \
<< caffe::curandGetErrorString(status); \
} while (0)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// CUDA: check for error after kernel execution and exit loudly if there is one.
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(hipPeekAtLastError())
// CUDA: use 512 threads per block
const int CAFFE_CUDA_NUM_THREADS = 64;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
template<typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype *data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype *data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype *data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype *data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template<typename Dtype>
void im2col_gpu(const Dtype *data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype *data_col) {
printf("XXXXXXXXXXXXXXXX\n");
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype> << < CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> > (
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
void im2col2(const float *data_im, const int channels, int height, int width, const int kszie,
const int pad, const int stride,
float *data_col, int *h_out, int *w_out) {
hipError_t cudaStatus;
float *dev_a = 0;
float *dev_b = 0;
int height_col = (height + 2 * pad -
((kszie - 1) + 1)) / stride + 1;
int width_col = (width + 2 * pad -
((kszie - 1) + 1)) / stride + 1;
*h_out = height_col;
*w_out = width_col;
cudaStatus = hipMalloc((void **) &dev_a, channels * height * width * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
// Error;
}
cudaStatus = hipMalloc((void **) &dev_b, channels * kszie * kszie * height_col * width_col * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
//goto Error;
}
hipMemcpy(dev_a, data_im, sizeof(float) * channels * height * width, hipMemcpyHostToDevice);
printf("%d,%d,%d,%d,%d,%d\n", channels, height, width, kszie, pad, stride);
im2col_gpu<float>(dev_a, channels, height, width, kszie, kszie, pad, pad, stride, stride, 1, 1, dev_b);
hipMemcpy(data_col, dev_b, channels * kszie * kszie * height_col * width_col * sizeof(float),
hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
// *h_out = height_col;
// *w_out = width_col;
// float *pcol_1, *pcol_2;
// float *pimg_1;
//
// int c, h, w, k1, k2;
// for (c = 0; c < channels; ++c) {
// pcol_1 = data_col + c * kszie * kszie;
// pimg_1 = (float *) (data_im + width * height * c);
// int a = 0;
// for (h = -pad; h < height_col * stride - pad; h += stride) {
// for (w = -pad; w < width_col * stride - pad; w += stride) {
//
// pcol_2 = pcol_1 + a * channels * kszie * kszie;
// a++;
// for (k1 = 0; k1 < kszie; ++k1) {
// for (k2 = 0; k2 < kszie; ++k2) {
// if (h + k1 < 0 || w + k2 < 0 || h + k1 > height - 1 || w + k2 > width - 1) {
// *pcol_2++ = 0;
// } else {
// *pcol_2++ = *(pimg_1 + (h + k1) * width + (w + k2));
// }
// }
// }
// }
// }
// }
}
void run22(const hipblasHandle_t &handle, const hipStream_t &stream, float *a, float *b, float *c) {
float *d_a, *d_b, *d_c;
gpu_memory_alloc<float>(6, d_a);
gpu_memory_alloc<float>(8, d_b);
gpu_memory_alloc<float>(12, d_c);
hipMemcpy(d_a, a, sizeof(float) * 6, hipMemcpyDefault);
hipMemcpy(d_b, b, sizeof(float) * 8, hipMemcpyDefault);
float alph = 1.0f;
float beta = 0.0f;
/// a(3*2) b(2 *4 )
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, 3, 2, &alph, d_b, 4, d_a, 2, &beta, d_c, 4);
hipMemcpyAsync(c, d_c, 12 * sizeof(float), hipMemcpyDefault, stream);
hipStreamSynchronize(stream);
printf("aaaaaaaaaaaaaaaaaaaaaa!!!\n");
}
hipblasStatus_t
addWithCuda6(const hipblasHandle_t &handle, const float *a, const float *b, const int WA, const int HA,
const int WB,
const int HB, float *c) {
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
printf("aaaaaaaaaaa!\n");
hipError_t cudaStatus;
hipblasStatus_t cublasStatus;
const int WC = WB;
const int HC = HA;
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void **) &dev_c, WC * HC * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
// Error;
}
cudaStatus = hipMalloc((void **) &dev_a, HA * WA * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
//goto Error;
}
cudaStatus = hipMalloc((void **) &dev_b, HB * WB * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
//goto Error;
}
hipblasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
hipblasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
//
hipDeviceSynchronize();
float alpha = 1.0;
float beta = 0.0;
//printf("aaaaaaaaaaa!\n");
int m = WB;
int n = HA;
int k = WA;
int lda = WA;
int ldb = WB;
int ldc = WB;
printf("%d,%d,%d,%d,%d,%d\n", m, n, k, lda, ldb, ldc);
clock_t start = clock();
cublasStatus = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, dev_b, ldb, dev_a, lda, &beta, dev_c,
ldc);
clock_t time_used = clock() - start;
printf("(GPU31) time:%ld\n", time_used);
hipDeviceSynchronize();
//printf("aaaaaaaaaaa!\n");
if (cublasStatus != HIPBLAS_STATUS_SUCCESS) {
printf("CUBLASdddddd\n");
if (cublasStatus == HIPBLAS_STATUS_NOT_INITIALIZED) {
printf("CUBLAS \n");
}
//return;
}
hipDeviceSynchronize();
hipDeviceSynchronize();
hipblasGetVector(HC * WC, sizeof(float), dev_c, 1, c, 1);
//Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cublasStatus;
}
| 2dbb33d1d0b38afa41d1b02787141a81a7f0c644.cu | //
// Created by yanhao on 17-11-21.
//
#include <stdio.h>
#include <stdlib.h>
//#include "utils.h"
#include <iostream>
//#include "helper_cuda.h"
#include <stdio.h>
#include <cuda_runtime_api.h>
#include "mul_cublas.h"
// Helper function for using CUDA to add vectors in parallel.
//cublasStatus_t
//addWithCuda(const cublasHandle_t &handle, float *c, const float *a, const float *b, unsigned int WA, unsigned int HA,
// unsigned int WB,
// unsigned int HB) {
//
// float *dev_a = 0;
// float *dev_b = 0;
// float *dev_c = 0;
// cudaError_t cudaStatus;
// cublasStatus_t cublasStatus;
//
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void **) &dev_c, HA * WB * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// printf( "cudaMalloc failed!");
// // Error;
// }
//
// cudaStatus = cudaMalloc((void **) &dev_a, HA * WA * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// printf( "cudaMalloc failed!");
// //goto Error;
// }
//
// cudaStatus = cudaMalloc((void **) &dev_b, HB * WB * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// printf( "cudaMalloc failed!");
// //goto Error;
// }
//
// cublasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
// cublasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
// // 同步函数
// cudaThreadSynchronize();
//
// float alpha = 1.0;
// float beta = 0.0;
// cublasStatus = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a, HA, &beta, dev_c,
// HA);
//
// cudaThreadSynchronize();
// cublasGetVector(HA * WB, sizeof(float), c, 1, dev_c, 1);
// //Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
// return cublasStatus;
//}
//cublasStatus_t
//addWithCuda(const cublasHandle_t &handle, float *c, const float *a, const float *b, unsigned int WA, unsigned int HA,
// unsigned int WB,
// unsigned int HB) {
//
// float *dev_a = 0;
// float *dev_b = 0;
// float *dev_c = 0;
// cudaError_t cudaStatus;
// cublasStatus_t cublasStatus;
//
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void **) &dev_c, HA * WB * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// printf( "cudaMalloc failed!");
// // Error;
// }
//
// cudaStatus = cudaMalloc((void **) &dev_a, HA * WA * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// printf( "cudaMalloc failed!");
// //goto Error;
// }
//
// cudaStatus = cudaMalloc((void **) &dev_b, HB * WB * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// printf( "cudaMalloc failed!");
// //goto Error;
// }
//
// cublasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
// cublasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
// // 同步函数
// cudaThreadSynchronize();
//
// float alpha = 1.0;
// float beta = 0.0;
// clock_t start = clock();
//
// cublasStatus = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a, HA, &beta, dev_c,
// HA);
//
//
// clock_t time_used = clock() - start;
// printf("(GPU31) time:%ld\n", time_used);
// cudaThreadSynchronize();
// cublasGetVector(HA * WB, sizeof(float), c, 1, dev_c, 1);
// for (int i = 0; i < 2; ++i) {
// for (int j = 0; j < 2; ++j) {
// printf("%f\n", c[i * 2 + j]);
// }
// }
// //Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
// return cublasStatus;
//}
//addWithCuda2(const cublasHandle_t &handle, float *dev_c, const float *dev_a, const float *dev_b, unsigned int WA,
// unsigned int HA, unsigned int WB,
// unsigned int HB) {
//
// float alpha = 1.0;
// float beta = 0.0;
// cublasStatus_t cublasStatus = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a,
// HA, &beta, dev_c, HA);
//
//
//}
/**
const int WA = 3;
const int HA = 2;
const int WB = 3;
const int HB = 4;
float A[WA * HA] = {1, 2, 3,
4, 5, 6};
float B[WB * HB] = {1, 3, 9,
2, 1, 1,
3, 2, 5,
0, 2, 8
};
*/
cublasStatus_t
addWithCuda5(const cublasHandle_t &handle, float *c, const float *a, const float *b, unsigned int WA, unsigned int HA,
unsigned int WB,
unsigned int HB) {
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
printf("aaaaaaaaaaa!\n");
cudaError_t cudaStatus;
cublasStatus_t cublasStatus;
const int WC = HB;
const int HC = HA;
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void **) &dev_c, WC * HC * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
// Error;
}
cudaStatus = cudaMalloc((void **) &dev_a, HA * WA * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cudaStatus = cudaMalloc((void **) &dev_b, HB * WB * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cublasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
cublasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
// 同步函数
cudaThreadSynchronize();
float alpha = 1.0;
float beta = 0.0;
//printf("aaaaaaaaaaa!\n");
int m = HB;
int n = HA;
int k = WB;
int lda = WB;
int ldb = WA;
int ldc = WC;
printf("%d,%d,%d,%d,%d,%d\n", m, n, k, lda, ldb, ldc);
clock_t start = clock();
cublasStatus = cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, dev_b, ldb, dev_a, lda, &beta, dev_c,
ldc);
clock_t time_used = clock() - start;
printf("(GPU31) time:%ld\n", time_used);
cudaThreadSynchronize();
//printf("aaaaaaaaaaa!\n");
if (cublasStatus != CUBLAS_STATUS_SUCCESS) {
printf("CUBLASdddddd\n");
if (cublasStatus == CUBLAS_STATUS_NOT_INITIALIZED) {
printf("CUBLAS 对象实例化出错\n");
}
//return;
}
cudaThreadSynchronize();
cudaThreadSynchronize();
cublasGetVector(HC * WC, sizeof(float), dev_c, 1, c, 1);
//Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cublasStatus;
}
template<typename T>
void gpu_memory_alloc(size_t len, T *&ptr) {
cudaMalloc(&ptr, sizeof(T) * len);
}
//#define CHECK_EQ(val1, val2) ((val1)==(val2))
#define CHECK_NE(val1, val2) CHECK_OP(_NE, !=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(_LE, <=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(_LT, < , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(_GE, >=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(_GT, > , val1, val2)
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
std::cout<< " log:" << cudaGetErrorString(error)<<std::endl; \
} while (0)
#define CUBLAS_CHECK(condition) \
do { \
cublasStatus_t status = condition; \
CHECK_EQ(status, CUBLAS_STATUS_SUCCESS) << " " \
<< caffe::cublasGetErrorString(status); \
} while (0)
#define CURAND_CHECK(condition) \
do { \
curandStatus_t status = condition; \
CHECK_EQ(status, CURAND_STATUS_SUCCESS) << " " \
<< caffe::curandGetErrorString(status); \
} while (0)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// CUDA: check for error after kernel execution and exit loudly if there is one.
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError())
// CUDA: use 512 threads per block
const int CAFFE_CUDA_NUM_THREADS = 64;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
template<typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype *data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype *data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype *data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype *data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template<typename Dtype>
void im2col_gpu(const Dtype *data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype *data_col) {
printf("XXXXXXXXXXXXXXXX\n");
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype> << < CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> > (
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
void im2col2(const float *data_im, const int channels, int height, int width, const int kszie,
const int pad, const int stride,
float *data_col, int *h_out, int *w_out) {
cudaError_t cudaStatus;
float *dev_a = 0;
float *dev_b = 0;
int height_col = (height + 2 * pad -
((kszie - 1) + 1)) / stride + 1;
int width_col = (width + 2 * pad -
((kszie - 1) + 1)) / stride + 1;
*h_out = height_col;
*w_out = width_col;
cudaStatus = cudaMalloc((void **) &dev_a, channels * height * width * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
// Error;
}
cudaStatus = cudaMalloc((void **) &dev_b, channels * kszie * kszie * height_col * width_col * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cudaMemcpy(dev_a, data_im, sizeof(float) * channels * height * width, cudaMemcpyHostToDevice);
printf("%d,%d,%d,%d,%d,%d\n", channels, height, width, kszie, pad, stride);
im2col_gpu<float>(dev_a, channels, height, width, kszie, kszie, pad, pad, stride, stride, 1, 1, dev_b);
cudaMemcpy(data_col, dev_b, channels * kszie * kszie * height_col * width_col * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
// *h_out = height_col;
// *w_out = width_col;
// float *pcol_1, *pcol_2;
// float *pimg_1;
//
// int c, h, w, k1, k2;
// for (c = 0; c < channels; ++c) {
// pcol_1 = data_col + c * kszie * kszie;
// pimg_1 = (float *) (data_im + width * height * c);
// int a = 0;
// for (h = -pad; h < height_col * stride - pad; h += stride) {
// for (w = -pad; w < width_col * stride - pad; w += stride) {
//
// pcol_2 = pcol_1 + a * channels * kszie * kszie;
// a++;
// for (k1 = 0; k1 < kszie; ++k1) {
// for (k2 = 0; k2 < kszie; ++k2) {
// if (h + k1 < 0 || w + k2 < 0 || h + k1 > height - 1 || w + k2 > width - 1) {
// *pcol_2++ = 0;
// } else {
// *pcol_2++ = *(pimg_1 + (h + k1) * width + (w + k2));
// }
// }
// }
// }
// }
// }
}
void run22(const cublasHandle_t &handle, const cudaStream_t &stream, float *a, float *b, float *c) {
float *d_a, *d_b, *d_c;
gpu_memory_alloc<float>(6, d_a);
gpu_memory_alloc<float>(8, d_b);
gpu_memory_alloc<float>(12, d_c);
cudaMemcpy(d_a, a, sizeof(float) * 6, cudaMemcpyDefault);
cudaMemcpy(d_b, b, sizeof(float) * 8, cudaMemcpyDefault);
float alph = 1.0f;
float beta = 0.0f;
/// a(3*2) b(2 *4 )
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, 3, 2, &alph, d_b, 4, d_a, 2, &beta, d_c, 4);
cudaMemcpyAsync(c, d_c, 12 * sizeof(float), cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
printf("aaaaaaaaaaaaaaaaaaaaaa!!!\n");
}
cublasStatus_t
addWithCuda6(const cublasHandle_t &handle, const float *a, const float *b, const int WA, const int HA,
const int WB,
const int HB, float *c) {
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
printf("aaaaaaaaaaa!\n");
cudaError_t cudaStatus;
cublasStatus_t cublasStatus;
const int WC = WB;
const int HC = HA;
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void **) &dev_c, WC * HC * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
// Error;
}
cudaStatus = cudaMalloc((void **) &dev_a, HA * WA * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cudaStatus = cudaMalloc((void **) &dev_b, HB * WB * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cublasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
cublasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
// 同步函数
cudaThreadSynchronize();
float alpha = 1.0;
float beta = 0.0;
//printf("aaaaaaaaaaa!\n");
int m = WB;
int n = HA;
int k = WA;
int lda = WA;
int ldb = WB;
int ldc = WB;
printf("%d,%d,%d,%d,%d,%d\n", m, n, k, lda, ldb, ldc);
clock_t start = clock();
cublasStatus = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, dev_b, ldb, dev_a, lda, &beta, dev_c,
ldc);
clock_t time_used = clock() - start;
printf("(GPU31) time:%ld\n", time_used);
cudaThreadSynchronize();
//printf("aaaaaaaaaaa!\n");
if (cublasStatus != CUBLAS_STATUS_SUCCESS) {
printf("CUBLASdddddd\n");
if (cublasStatus == CUBLAS_STATUS_NOT_INITIALIZED) {
printf("CUBLAS 对象实例化出错\n");
}
//return;
}
cudaThreadSynchronize();
cudaThreadSynchronize();
cublasGetVector(HC * WC, sizeof(float), dev_c, 1, c, 1);
//Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cublasStatus;
}
|
5a95448acd65f6177ac9e8ecf72209d9ff840373.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <vector>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
void TestUpdatePositionBatch() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
std::vector<int> extra_data = {0};
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePositionBatch({0}, {1}, {2}, extra_data, [=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx > 4;
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePositionBatch({1}, {3}, {4}, extra_data,[=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx < 7;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
}
TEST(RowPartitioner, Batch) { TestUpdatePositionBatch(); }
void TestSortPositionBatch(const std::vector<int>& ridx_in, const std::vector<Segment>& segments) {
thrust::device_vector<uint32_t> ridx = ridx_in;
thrust::device_vector<uint32_t> ridx_tmp(ridx_in.size());
thrust::device_vector<bst_uint> counts(segments.size());
auto op = [=] __device__(auto ridx, int data) { return ridx % 2 == 0; };
std::vector<int> op_data(segments.size());
std::vector<PerNodeData<int>> h_batch_info(segments.size());
dh::TemporaryArray<PerNodeData<int>> d_batch_info(segments.size());
std::size_t total_rows = 0;
for (size_t i = 0; i < segments.size(); i++) {
h_batch_info[i] = {segments.at(i), 0};
total_rows += segments.at(i).Size();
}
dh::safe_cuda(hipMemcpyAsync(d_batch_info.data().get(), h_batch_info.data(),
h_batch_info.size() * sizeof(PerNodeData<int>), hipMemcpyDefault,
nullptr));
dh::device_vector<int8_t> tmp;
SortPositionBatch<uint32_t, decltype(op), int>(dh::ToSpan(d_batch_info), dh::ToSpan(ridx),
dh::ToSpan(ridx_tmp), dh::ToSpan(counts),
total_rows, op, &tmp);
auto op_without_data = [=] __device__(auto ridx) { return ridx % 2 == 0; };
for (size_t i = 0; i < segments.size(); i++) {
auto begin = ridx.begin() + segments[i].begin;
auto end = ridx.begin() + segments[i].end;
bst_uint count = counts[i];
auto left_partition_count =
thrust::count_if(thrust::device, begin, begin + count, op_without_data);
EXPECT_EQ(left_partition_count, count);
auto right_partition_count =
thrust::count_if(thrust::device, begin + count, end, op_without_data);
EXPECT_EQ(right_partition_count, 0);
}
}
TEST(GpuHist, SortPositionBatch) {
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 3}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 1}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{3, 6}, {0, 2}});
}
} // namespace tree
} // namespace xgboost
| 5a95448acd65f6177ac9e8ecf72209d9ff840373.cu | /*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <vector>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
void TestUpdatePositionBatch() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
std::vector<int> extra_data = {0};
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePositionBatch({0}, {1}, {2}, extra_data, [=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx > 4;
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePositionBatch({1}, {3}, {4}, extra_data,[=] __device__(RowPartitioner::RowIndexT ridx, int) {
return ridx < 7;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
}
TEST(RowPartitioner, Batch) { TestUpdatePositionBatch(); }
void TestSortPositionBatch(const std::vector<int>& ridx_in, const std::vector<Segment>& segments) {
thrust::device_vector<uint32_t> ridx = ridx_in;
thrust::device_vector<uint32_t> ridx_tmp(ridx_in.size());
thrust::device_vector<bst_uint> counts(segments.size());
auto op = [=] __device__(auto ridx, int data) { return ridx % 2 == 0; };
std::vector<int> op_data(segments.size());
std::vector<PerNodeData<int>> h_batch_info(segments.size());
dh::TemporaryArray<PerNodeData<int>> d_batch_info(segments.size());
std::size_t total_rows = 0;
for (size_t i = 0; i < segments.size(); i++) {
h_batch_info[i] = {segments.at(i), 0};
total_rows += segments.at(i).Size();
}
dh::safe_cuda(cudaMemcpyAsync(d_batch_info.data().get(), h_batch_info.data(),
h_batch_info.size() * sizeof(PerNodeData<int>), cudaMemcpyDefault,
nullptr));
dh::device_vector<int8_t> tmp;
SortPositionBatch<uint32_t, decltype(op), int>(dh::ToSpan(d_batch_info), dh::ToSpan(ridx),
dh::ToSpan(ridx_tmp), dh::ToSpan(counts),
total_rows, op, &tmp);
auto op_without_data = [=] __device__(auto ridx) { return ridx % 2 == 0; };
for (size_t i = 0; i < segments.size(); i++) {
auto begin = ridx.begin() + segments[i].begin;
auto end = ridx.begin() + segments[i].end;
bst_uint count = counts[i];
auto left_partition_count =
thrust::count_if(thrust::device, begin, begin + count, op_without_data);
EXPECT_EQ(left_partition_count, count);
auto right_partition_count =
thrust::count_if(thrust::device, begin + count, end, op_without_data);
EXPECT_EQ(right_partition_count, 0);
}
}
TEST(GpuHist, SortPositionBatch) {
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 3}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 1}, {3, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{0, 6}});
TestSortPositionBatch({0, 1, 2, 3, 4, 5}, {{3, 6}, {0, 2}});
}
} // namespace tree
} // namespace xgboost
|
671f9ca8cbf9c6273c16d1b6a005e8f437408314.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/feat_reshape_layer.hpp"
namespace caffe {
// forward: fill zero
template <typename Dtype>
__global__ void ZeroFilling(const int n, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = 0.0;
}
}
// forward: copy bottom data to buffer data
template <typename Dtype>
__global__ void PixelCopying(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// mapping to the src data domain
int spb = index / (src_width * src_height * src_channels);
int spc = (index - spb * src_width * src_height * src_channels) / (src_width * src_height);
int spy = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height) / src_width;
int spx = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height - spy * src_width) % src_width;
// mapping to the dst data domain
int dpx = spx;
int dpy = spy;
int dpc = spc;
int dpb = spb;
int dst_index = dpx + dpy * dst_width + dpc * dst_width * dst_height + dpb * dst_width * dst_height * dst_channels;
out[dst_index] = in[index];
}
}
// backward: copy buffer diff to bottom diff
template <typename Dtype>
__global__ void PixelDiffCopying(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// mapping to the dst diff domain
int dpb = index / (dst_width * dst_height * dst_channels);
int dpc = (index - dpb * dst_width * dst_height * dst_channels) / (dst_width * dst_height);
int dpy = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height) / dst_width;
int dpx = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height - dpy * dst_width) % dst_width;
// mapping to the src diff domain
int spx = dpx;
int spy = dpy;
int spc = dpc;
int spb = dpb;
int src_index = spx + spy * src_width + spc * src_width * src_height + spb * src_width * src_height * src_channels;
// copy
out[index] = in[src_index];
}
}
// forward: reshaping the bottom data to top data
template <typename Dtype>
__global__ void PixelReshaping(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// pixel location decoding in the dst domain
int dpb = index / (dst_width * dst_height * dst_channels);
int dpc = (index - dpb * dst_width * dst_height * dst_channels) / (dst_width * dst_height);
int dpy = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height) / dst_width;
int dpx = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height - dpy * dst_width) % dst_width;
// pixel location encoding in the src domain
int spb = dpb;
int spc = dpc / 4;
int block_shift = dpc % 4;
int spx = dpx * 2;
int spy = dpy * 2;
if (block_shift == 0) {
spx += 0;
spy += 0;
}else if (block_shift == 1) {
spx += 1;
spy += 0;
}else if (block_shift == 2) {
spx += 0;
spy += 1;
}else {
spx += 1;
spy += 1;
}
int src_index = spx + spy * src_width + spc * src_width * src_height + spb * src_width * src_height * src_channels;
// copy
out[index] = in[src_index];
}
}
// backward: reshaping the top diff to buffer diff
template <typename Dtype>
__global__ void PixelDiffReshaping(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// pixel location decoding in the src domain
int spb = index / (src_width * src_height * src_channels);
int spc = (index - spb * src_width * src_height * src_channels) / (src_width * src_height);
int spy = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height) / src_width;
int spx = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height - spy * src_width) % src_width;
// pixel location encoding in the dst domain
int dpb = spb;
int dpc = spc / 4;
int block_shift = spc % 4;
int dpx = spx * 2;
int dpy = spy * 2;
if (block_shift == 0) {
dpx += 0;
dpy += 0;
}else if (block_shift == 1) {
dpx += 1;
dpy += 0;
}else if (block_shift == 2) {
dpx += 0;
dpy += 1;
}else {
dpx += 1;
dpy += 1;
}
int dst_index = dpx + dpy * dst_width + dpc * dst_width * dst_height + dpb * dst_width * dst_height * dst_channels;
// copy
out[dst_index] = in[index];
}
}
// do forward
template <typename Dtype>
void FeatReshapeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int src_num = bottom[0]->num();
int src_channels = bottom[0]->channels();
int src_height = bottom[0]->height();
int src_width = bottom[0]->width();
int dst_height = top[0]->height();
int dst_width = top[0]->width();
int dst_channels = top[0]->channels();
int dst_num = top[0]->num();
int buf_num = src_num;
int buf_channels = src_channels;
int buf_height = src_height;
int buf_width = src_width;
// if odd add one
if (src_height % 2 == 1) {
buf_height += 1;
}
if (src_width % 2 == 1) {
buf_width += 1;
}
/*LOG(INFO) <<buf_num<<" "<<buf_channels<<" "<<buf_height<<" "<<buf_width;
LOG(INFO) <<src_num<<" "<<src_channels<<" "<<src_height<<" "<<src_width;
LOG(INFO) <<this->imgdata_buf_->num()<<" "<<this->imgdata_buf_->channels()<<" "<<this->imgdata_buf_->height()<<" "<<this->imgdata_buf_->width();*/
const int buf_count = this->feat_buf_->count();
const int src_count = bottom[0]->count();
const int dst_count = top[0]->count();
// filling zero to data buffer
Dtype* feat_buf = this->feat_buf_->mutable_gpu_data();
hipLaunchKernelGGL(( ZeroFilling<Dtype>), dim3(CAFFE_GET_BLOCKS(buf_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
buf_count, feat_buf);
CUDA_POST_KERNEL_CHECK;
// copy src data to buffer data
const Dtype* bottom_data = bottom[0]->gpu_data();
feat_buf = this->feat_buf_->mutable_gpu_data();
hipLaunchKernelGGL(( PixelCopying<Dtype>), dim3(CAFFE_GET_BLOCKS(src_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
src_count, bottom_data, feat_buf,
src_num, src_channels, src_height, src_width,
buf_num, buf_channels, buf_height, buf_width);
CUDA_POST_KERNEL_CHECK;
// pixel reshape
const Dtype* feat_rbuf = this->feat_buf_->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
hipLaunchKernelGGL(( PixelReshaping<Dtype>), dim3(CAFFE_GET_BLOCKS(dst_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dst_count, feat_rbuf, top_data,
buf_num, buf_channels, buf_height, buf_width,
dst_num, dst_channels, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
// do backward
template <typename Dtype>
void FeatReshapeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int dst_num = bottom[0]->num();
int dst_channels = bottom[0]->channels();
int dst_height = bottom[0]->height();
int dst_width = bottom[0]->width();
int src_height = top[0]->height();
int src_width = top[0]->width();
int src_channels = top[0]->channels();
int src_num = top[0]->num();
int buf_num = dst_num;
int buf_channels = dst_channels;
int buf_height = dst_height;
int buf_width = dst_width;
// if odd add one
if (dst_height % 2 == 1) {
buf_height += 1;
}
if (dst_width % 2 == 1) {
buf_width += 1;
}
const int buf_count = this->feat_buf_->count();
const int dst_count = bottom[0]->count();
const int src_count = top[0]->count();
// pixel diff reshape
Dtype* feat_diff_buf = this->feat_buf_->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
hipLaunchKernelGGL(( PixelDiffReshaping<Dtype>), dim3(CAFFE_GET_BLOCKS(src_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
src_count, top_diff, feat_diff_buf,
src_num, src_channels, src_height, src_width,
buf_num, buf_channels, buf_height, buf_width);
CUDA_POST_KERNEL_CHECK;
// pixel diff copy
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* feat_rbuf = this->feat_buf_->gpu_diff();
hipLaunchKernelGGL(( PixelDiffCopying<Dtype>), dim3(CAFFE_GET_BLOCKS(dst_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dst_count, feat_rbuf, bottom_diff,
buf_num, buf_channels, buf_height, buf_width,
dst_num, dst_channels, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(FeatReshapeLayer);
} // namespace caffe
| 671f9ca8cbf9c6273c16d1b6a005e8f437408314.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/feat_reshape_layer.hpp"
namespace caffe {
// forward: fill zero
template <typename Dtype>
__global__ void ZeroFilling(const int n, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = 0.0;
}
}
// forward: copy bottom data to buffer data
template <typename Dtype>
__global__ void PixelCopying(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// mapping to the src data domain
int spb = index / (src_width * src_height * src_channels);
int spc = (index - spb * src_width * src_height * src_channels) / (src_width * src_height);
int spy = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height) / src_width;
int spx = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height - spy * src_width) % src_width;
// mapping to the dst data domain
int dpx = spx;
int dpy = spy;
int dpc = spc;
int dpb = spb;
int dst_index = dpx + dpy * dst_width + dpc * dst_width * dst_height + dpb * dst_width * dst_height * dst_channels;
out[dst_index] = in[index];
}
}
// backward: copy buffer diff to bottom diff
template <typename Dtype>
__global__ void PixelDiffCopying(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// mapping to the dst diff domain
int dpb = index / (dst_width * dst_height * dst_channels);
int dpc = (index - dpb * dst_width * dst_height * dst_channels) / (dst_width * dst_height);
int dpy = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height) / dst_width;
int dpx = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height - dpy * dst_width) % dst_width;
// mapping to the src diff domain
int spx = dpx;
int spy = dpy;
int spc = dpc;
int spb = dpb;
int src_index = spx + spy * src_width + spc * src_width * src_height + spb * src_width * src_height * src_channels;
// copy
out[index] = in[src_index];
}
}
// forward: reshaping the bottom data to top data
template <typename Dtype>
__global__ void PixelReshaping(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// pixel location decoding in the dst domain
int dpb = index / (dst_width * dst_height * dst_channels);
int dpc = (index - dpb * dst_width * dst_height * dst_channels) / (dst_width * dst_height);
int dpy = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height) / dst_width;
int dpx = (index - dpb * dst_width * dst_height * dst_channels - dpc * dst_width * dst_height - dpy * dst_width) % dst_width;
// pixel location encoding in the src domain
int spb = dpb;
int spc = dpc / 4;
int block_shift = dpc % 4;
int spx = dpx * 2;
int spy = dpy * 2;
if (block_shift == 0) {
spx += 0;
spy += 0;
}else if (block_shift == 1) {
spx += 1;
spy += 0;
}else if (block_shift == 2) {
spx += 0;
spy += 1;
}else {
spx += 1;
spy += 1;
}
int src_index = spx + spy * src_width + spc * src_width * src_height + spb * src_width * src_height * src_channels;
// copy
out[index] = in[src_index];
}
}
// backward: reshaping the top diff to buffer diff
template <typename Dtype>
__global__ void PixelDiffReshaping(const int n, const Dtype* in, Dtype* out,
int src_num, int src_channels, int src_height, int src_width,
int dst_num, int dst_channels, int dst_height, int dst_width) {
CUDA_KERNEL_LOOP(index, n) {
// pixel location decoding in the src domain
int spb = index / (src_width * src_height * src_channels);
int spc = (index - spb * src_width * src_height * src_channels) / (src_width * src_height);
int spy = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height) / src_width;
int spx = (index - spb * src_width * src_height * src_channels - spc * src_width * src_height - spy * src_width) % src_width;
// pixel location encoding in the dst domain
int dpb = spb;
int dpc = spc / 4;
int block_shift = spc % 4;
int dpx = spx * 2;
int dpy = spy * 2;
if (block_shift == 0) {
dpx += 0;
dpy += 0;
}else if (block_shift == 1) {
dpx += 1;
dpy += 0;
}else if (block_shift == 2) {
dpx += 0;
dpy += 1;
}else {
dpx += 1;
dpy += 1;
}
int dst_index = dpx + dpy * dst_width + dpc * dst_width * dst_height + dpb * dst_width * dst_height * dst_channels;
// copy
out[dst_index] = in[index];
}
}
// do forward
template <typename Dtype>
void FeatReshapeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int src_num = bottom[0]->num();
int src_channels = bottom[0]->channels();
int src_height = bottom[0]->height();
int src_width = bottom[0]->width();
int dst_height = top[0]->height();
int dst_width = top[0]->width();
int dst_channels = top[0]->channels();
int dst_num = top[0]->num();
int buf_num = src_num;
int buf_channels = src_channels;
int buf_height = src_height;
int buf_width = src_width;
// if odd add one
if (src_height % 2 == 1) {
buf_height += 1;
}
if (src_width % 2 == 1) {
buf_width += 1;
}
/*LOG(INFO) <<buf_num<<" "<<buf_channels<<" "<<buf_height<<" "<<buf_width;
LOG(INFO) <<src_num<<" "<<src_channels<<" "<<src_height<<" "<<src_width;
LOG(INFO) <<this->imgdata_buf_->num()<<" "<<this->imgdata_buf_->channels()<<" "<<this->imgdata_buf_->height()<<" "<<this->imgdata_buf_->width();*/
const int buf_count = this->feat_buf_->count();
const int src_count = bottom[0]->count();
const int dst_count = top[0]->count();
// filling zero to data buffer
Dtype* feat_buf = this->feat_buf_->mutable_gpu_data();
ZeroFilling<Dtype><<<CAFFE_GET_BLOCKS(buf_count), CAFFE_CUDA_NUM_THREADS>>>(
buf_count, feat_buf);
CUDA_POST_KERNEL_CHECK;
// copy src data to buffer data
const Dtype* bottom_data = bottom[0]->gpu_data();
feat_buf = this->feat_buf_->mutable_gpu_data();
PixelCopying<Dtype><<<CAFFE_GET_BLOCKS(src_count), CAFFE_CUDA_NUM_THREADS>>>(
src_count, bottom_data, feat_buf,
src_num, src_channels, src_height, src_width,
buf_num, buf_channels, buf_height, buf_width);
CUDA_POST_KERNEL_CHECK;
// pixel reshape
const Dtype* feat_rbuf = this->feat_buf_->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
PixelReshaping<Dtype><<<CAFFE_GET_BLOCKS(dst_count), CAFFE_CUDA_NUM_THREADS>>>(
dst_count, feat_rbuf, top_data,
buf_num, buf_channels, buf_height, buf_width,
dst_num, dst_channels, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
// do backward
template <typename Dtype>
void FeatReshapeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int dst_num = bottom[0]->num();
int dst_channels = bottom[0]->channels();
int dst_height = bottom[0]->height();
int dst_width = bottom[0]->width();
int src_height = top[0]->height();
int src_width = top[0]->width();
int src_channels = top[0]->channels();
int src_num = top[0]->num();
int buf_num = dst_num;
int buf_channels = dst_channels;
int buf_height = dst_height;
int buf_width = dst_width;
// if odd add one
if (dst_height % 2 == 1) {
buf_height += 1;
}
if (dst_width % 2 == 1) {
buf_width += 1;
}
const int buf_count = this->feat_buf_->count();
const int dst_count = bottom[0]->count();
const int src_count = top[0]->count();
// pixel diff reshape
Dtype* feat_diff_buf = this->feat_buf_->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
PixelDiffReshaping<Dtype><<<CAFFE_GET_BLOCKS(src_count), CAFFE_CUDA_NUM_THREADS>>>(
src_count, top_diff, feat_diff_buf,
src_num, src_channels, src_height, src_width,
buf_num, buf_channels, buf_height, buf_width);
CUDA_POST_KERNEL_CHECK;
// pixel diff copy
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* feat_rbuf = this->feat_buf_->gpu_diff();
PixelDiffCopying<Dtype><<<CAFFE_GET_BLOCKS(dst_count), CAFFE_CUDA_NUM_THREADS>>>(
dst_count, feat_rbuf, bottom_diff,
buf_num, buf_channels, buf_height, buf_width,
dst_num, dst_channels, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(FeatReshapeLayer);
} // namespace caffe
|
3bd631e68c037e2c773f810338ddd70cee38b160.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
input_array[0] = my_index;
}
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = 1;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
hipMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 0;
}
// copy them to the GPU
hipMemcpy(device_array, host_array, num_bytes, hipMemcpyHostToDevice);
// define block and grid sizes
int block_size = 32;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
hipLaunchKernelGGL(( device_global), dim3(grid_size), dim3(block_size), 0, 0, device_array, num_elements);
// copy output to host
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
// print any information
printf("host_array[0] = %u \n", host_array[0]);
// free memory
free(host_array);
hipFree(device_array);
} | 3bd631e68c037e2c773f810338ddd70cee38b160.cu | #include <stdio.h>
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
input_array[0] = my_index;
}
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = 1;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
cudaMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 0;
}
// copy them to the GPU
cudaMemcpy(device_array, host_array, num_bytes, cudaMemcpyHostToDevice);
// define block and grid sizes
int block_size = 32;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
device_global<<<grid_size, block_size>>>(device_array, num_elements);
// copy output to host
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print any information
printf("host_array[0] = %u \n", host_array[0]);
// free memory
free(host_array);
cudaFree(device_array);
} |
0120273f40fa312eb16f4a719a3db8bf31cdc1ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTable(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
class LookupTableCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.Inputs("Ids").front();
auto out_name = context.Outputs("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
auto *ids = ids_t->data<int64_t>();
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
dim3 threads(128, 8);
dim3 grids(8, 1);
if (padding_idx == -1)
hipLaunchKernelGGL(( LookupTable<
T, 128, 8, 8,
false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
else
hipLaunchKernelGGL(( LookupTable<
T, 128, 8, 8,
true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(),
output, table, ids, N, K, D, padding_idx);
}
};
template <typename T>
class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace());
// TODO(yuyang18): Strange code here.
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d);
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t *ids = ids_t->data<int64_t>();
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
dim3 threads(128, 8);
dim3 grids(8, 1);
hipLaunchKernelGGL(( LookupTableGrad<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids, N, K, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>,
ops::LookupTableCUDAKernel<double>,
ops::LookupTableCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(lookup_table_grad,
ops::LookupTableGradCUDAKernel<float>,
ops::LookupTableGradCUDAKernel<double>,
ops::LookupTableGradCUDAKernel<plat::float16>);
| 0120273f40fa312eb16f4a719a3db8bf31cdc1ba.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX,
bool PaddingFlag>
__global__ void LookupTable(T *output, const T *table, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ENFORCE(
id >= 0,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
PADDLE_ENFORCE(
id < N,
"Variable value (input) of OP(fluid.layers.embedding) "
"expected >= 0 and < %ld, but got %ld. Please check input value.",
N, id);
const T *out = output + idy * D;
T *tab = table + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
idy += BlockDimY * GridDimX;
}
}
template <typename T>
class LookupTableCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
int64_t padding_idx = context.Attr<int64_t>("padding_idx");
auto id_name = context.Inputs("Ids").front();
auto out_name = context.Outputs("Out").front();
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
auto *ids = ids_t->data<int64_t>();
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
dim3 threads(128, 8);
dim3 grids(8, 1);
if (padding_idx == -1)
LookupTable<
T, 128, 8, 8,
false><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
else
LookupTable<
T, 128, 8, 8,
true><<<grids, threads, 0, context.cuda_device_context().stream()>>>(
output, table, ids, N, K, D, padding_idx);
}
};
template <typename T>
class LookupTableGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *ids = context.Input<LoDTensor>("Ids");
auto *table = context.Input<LoDTensor>("W");
auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
auto *ids_data = ids->data<int64_t>();
int64_t ids_num = ids->numel();
auto stream = dev_ctx.stream();
// copy GPU memory to CPU pinned memory
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace());
// TODO(yuyang18): Strange code here.
memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->mutable_data<T>(context.GetPlace());
auto *d_table_data = d_table_value->data<T>();
auto *d_output_data = d_output->data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d);
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t *ids = ids_t->data<int64_t>();
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
dim3 threads(128, 8);
dim3 grids(8, 1);
LookupTableGrad<T, 128, 8, 8><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids, N, K, D);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>,
ops::LookupTableCUDAKernel<double>,
ops::LookupTableCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(lookup_table_grad,
ops::LookupTableGradCUDAKernel<float>,
ops::LookupTableGradCUDAKernel<double>,
ops::LookupTableGradCUDAKernel<plat::float16>);
|
46b2da089a2b2e45aec8b03095d391b0d283c794.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 46b2da089a2b2e45aec8b03095d391b0d283c794.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
0c238e1b45ffd69202cf30c9f3477dd411ecb2a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//********************************************************************************************************
#include <cstdio>// a simple matrix matrix multiplication in CUDA
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
using namespace std;
//#ifdef DOLOG
//#define LOG(msg) std::cerr<<msg<<std::endl
#define LOG(msg) fprintf(stderr, msg "\n");
//#else
//#define LOG(msg)
//#endif
// host code for validating last cuda operation (not kernel launch)
//using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU matrix multiplication: code(the normal way to mulitiply matrices)
__global__
void multiply (int width,int height ,int firstCol ,int** A,int** B,int** C,int** A_O,int** B_O, int** C_O)
{
int x_idx = threadIdx.x + blockIdx.x * blockDim.x;
int y_idx = threadIdx.y + blockIdx.y * blockDim.y;
if (x_idx < width && y_idx <height ) {
//int temp = 0;
for (int i = 0; i <3; i++)
{
//printf ("i= %d", i);
//printf ("\n");
for (int j = 0; j <3; j++)
{
//printf ("j= %d", j);
//printf ("\n");
int temp = 0;
//printf ("temp= %d", temp);
//printf ("\n");
for (int k = 0; k < firstCol; k++)
{
//printf ("k= %d", k);
// printf ("\n");
temp += A[i][k] * B[k][j]; //C is fixed while A is rowWise and B is columnWise
C_O[i][j] =temp ;
//printf ("Cij= %d", temp);
//printf ("\n");
}
}
}
}
}
//**************************************************************************************
__global__ void display (int width, int height,int** A,int** B,int** C ){
for (int i = 0; i <3; i++)
{
for (int j = 0; j < 3; j++)
{
//printf ("%d\t", C[i][j]);
}
printf ("\n");
}
}
//***********************************************************************************
int main ()
{
int width, height;
//vector<vector<int> > A { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
//vector<vector<int> > B { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
//vector<vector<int> > C { {0, 0, 0}, {0, 0, 0}, {0, 0, 0} };
int A[][3]= { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
int B[][3]= { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
int C[][3]= { {0, 0, 0}, {0, 0, 0}, {0, 0, 0} };
//**********************************************************************************
//Memory allocaction
width=3;
height=3;
int firstCol= 3;
int **d_A, **d_B, **d_C; //allocate memory on device
//copy matrix to GPU
gpuErrchk(hipMalloc((void**)&d_A, (width)*(height)*sizeof(int)));
gpuErrchk(hipMalloc((void**)&d_B, (width)*(height)*sizeof(int)));
gpuErrchk(hipMalloc((void**)&d_C, (width)*(height)*sizeof(int)));
//same
int **A_O,**B_O,**C_O;
gpuErrchk(hipMalloc((void**)&A_O,(width)*(height)*sizeof(int)));
gpuErrchk(hipMalloc((void**)&B_O,(width)*(height)*sizeof(int)));
gpuErrchk(hipMalloc((void**)&C_O,(width)*(height)*sizeof(int)));
gpuErrchk(hipMemcpy(d_A, &A[0][0],(width)*(height)*sizeof(int) , hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_B, &B[0][0],(width)*(height)*sizeof(int) , hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_C, &C[0][0],(width)*(height)*sizeof(int) , hipMemcpyHostToDevice));
//printf ("matrix A= %d", A);
//printf ("matrix B= %d", B);
//call kernel
dim3 tpb(16,16);
dim3 bpg((width+tpb.x-1)/tpb.x, (height+tpb.y-1)/tpb.y);
hipLaunchKernelGGL((
multiply), dim3(bpg),dim3(tpb), 0, 0, width, height , firstCol ,d_A,d_B,d_C, A_O, B_O, C_O);hipLaunchKernelGGL((
display), dim3(bpg),dim3(tpb), 0, 0, width,height,d_A,d_B,d_C);
//copy matrix back to CPU
//gpuErrchk(hipMemcpy(&A[0][0], d_A, (width)*(height)*sizeof(int), hipMemcpyDeviceToHost));
//gpuErrchk(hipMemcpy(&B[0][0], d_B, (width)*(height)*sizeof(int), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&C[0][0], (void**)C_O, (width)*(height)*sizeof(int), hipMemcpyDeviceToHost));
hipFree(d_A);hipFree(d_B);hipFree(d_C);hipFree(A_O);hipFree(B_O);hipFree(C_O);
return 0;
} | 0c238e1b45ffd69202cf30c9f3477dd411ecb2a8.cu | //********************************************************************************************************
#include <cstdio>// a simple matrix matrix multiplication in CUDA
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
using namespace std;
//#ifdef DOLOG
//#define LOG(msg) std::cerr<<msg<<std::endl
#define LOG(msg) fprintf(stderr, msg "\n");
//#else
//#define LOG(msg)
//#endif
// host code for validating last cuda operation (not kernel launch)
//using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU matrix multiplication: code(the normal way to mulitiply matrices)
__global__
void multiply (int width,int height ,int firstCol ,int** A,int** B,int** C,int** A_O,int** B_O, int** C_O)
{
int x_idx = threadIdx.x + blockIdx.x * blockDim.x;
int y_idx = threadIdx.y + blockIdx.y * blockDim.y;
if (x_idx < width && y_idx <height ) {
//int temp = 0;
for (int i = 0; i <3; i++)
{
//printf ("i= %d", i);
//printf ("\n");
for (int j = 0; j <3; j++)
{
//printf ("j= %d", j);
//printf ("\n");
int temp = 0;
//printf ("temp= %d", temp);
//printf ("\n");
for (int k = 0; k < firstCol; k++)
{
//printf ("k= %d", k);
// printf ("\n");
temp += A[i][k] * B[k][j]; //C is fixed while A is rowWise and B is columnWise
C_O[i][j] =temp ;
//printf ("Cij= %d", temp);
//printf ("\n");
}
}
}
}
}
//**************************************************************************************
__global__ void display (int width, int height,int** A,int** B,int** C ){
for (int i = 0; i <3; i++)
{
for (int j = 0; j < 3; j++)
{
//printf ("%d\t", C[i][j]);
}
printf ("\n");
}
}
//***********************************************************************************
int main ()
{
int width, height;
//vector<vector<int> > A { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
//vector<vector<int> > B { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
//vector<vector<int> > C { {0, 0, 0}, {0, 0, 0}, {0, 0, 0} };
int A[][3]= { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
int B[][3]= { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
int C[][3]= { {0, 0, 0}, {0, 0, 0}, {0, 0, 0} };
//**********************************************************************************
//Memory allocaction
width=3;
height=3;
int firstCol= 3;
int **d_A, **d_B, **d_C; //allocate memory on device
//copy matrix to GPU
gpuErrchk(cudaMalloc((void**)&d_A, (width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&d_B, (width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&d_C, (width)*(height)*sizeof(int)));
//same
int **A_O,**B_O,**C_O;
gpuErrchk(cudaMalloc((void**)&A_O,(width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&B_O,(width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&C_O,(width)*(height)*sizeof(int)));
gpuErrchk(cudaMemcpy(d_A, &A[0][0],(width)*(height)*sizeof(int) , cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_B, &B[0][0],(width)*(height)*sizeof(int) , cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_C, &C[0][0],(width)*(height)*sizeof(int) , cudaMemcpyHostToDevice));
//printf ("matrix A= %d", A);
//printf ("matrix B= %d", B);
//call kernel
dim3 tpb(16,16);
dim3 bpg((width+tpb.x-1)/tpb.x, (height+tpb.y-1)/tpb.y);
multiply<<<bpg,tpb>>>(width, height , firstCol ,d_A,d_B,d_C, A_O, B_O, C_O);
display<<<bpg,tpb>>>(width,height,d_A,d_B,d_C);
//copy matrix back to CPU
//gpuErrchk(cudaMemcpy(&A[0][0], d_A, (width)*(height)*sizeof(int), cudaMemcpyDeviceToHost));
//gpuErrchk(cudaMemcpy(&B[0][0], d_B, (width)*(height)*sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&C[0][0], (void**)C_O, (width)*(height)*sizeof(int), cudaMemcpyDeviceToHost));
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);cudaFree(A_O);cudaFree(B_O);cudaFree(C_O);
return 0;
} |
9b9a726b7931d39ee62537d64f023d92ccaef2b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define DATA float
#define BOOL int
#define MAX_ERR 1e-5
#define MAX(a,b) ((a)>(b)?(a):(b))
//Grid features
//Leggere 15 febbraio del diario (passo 1 del feedforward, considerazioni)
#define OPTIMUM_BLOCK_NUM 4
#define BLOCK_SIDE 16
#define OPTIMUM_BLOCK_NUM_FIRST_LAYER 2
#define BLOCK_SIDE_FIRST_LAYER 32
/*Struct Grid Settings*/
typedef struct grid_settings {
unsigned int grid[3];
unsigned int block[3];
}grid_settings;
grid_settings gs = { { OPTIMUM_BLOCK_NUM_FIRST_LAYER, OPTIMUM_BLOCK_NUM, OPTIMUM_BLOCK_NUM },{ BLOCK_SIDE_FIRST_LAYER,BLOCK_SIDE,BLOCK_SIDE } };
//Network features
#define NEURO_INPUT 784 //#neurons of input layer
#define NEURO_H_0 56 //#neurons of first hidden layer
#define NEURO_H_1 28 //#neurons of second hidden layer
#define NEURO_OUTPUT 10 //#neurons of output layer
#define TOTAL_PATT 60000 //#total patterns
#define NUM_HIDDEN 2 //#hidden layers
#define TOTAL_LAYER 4 //#of layers
//Streams Settings
#define NSTREAMS 3
#define STREAMSIZE TOTAL_PATT/NSTREAMS
/*Struct One Copy HostToDev -- Contains weights and bias*/
//struct features
#define MATRIX_NUMBER_STRUCT 4 //#matrix to copy to Device(in struct)
#define GLOBAL_H_SIZE TOTAL_PATT * (NEURO_INPUT + NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT)
#define GLOBAL_DELTA_SIZE TOTAL_PATT * (NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT)
#define GLOBAL_W_SIZE (NEURO_INPUT*NEURO_H_0) + (NEURO_H_0*NEURO_H_1) + (NEURO_H_1*NEURO_OUTPUT)
#define GLOBAL_BIAS_SIZE NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT
typedef struct host_to_dev_mem {
DATA WeightH2H[GLOBAL_W_SIZE];
DATA BiasH2H[GLOBAL_BIAS_SIZE];
DATA Delta[GLOBAL_DELTA_SIZE];
DATA H2H[GLOBAL_H_SIZE];
int matrix_WB_index[MATRIX_NUMBER_STRUCT - 2][TOTAL_LAYER - 1];//INDEX for padding in Weight & Bias
int matrix_DELTA_index[MATRIX_NUMBER_STRUCT - 3][TOTAL_LAYER - 1];//INDEX for padding in Delta
int matrix_H2H_index[MATRIX_NUMBER_STRUCT - 3][TOTAL_LAYER];//INDEX for padding in H2H
} host_to_dev_mem;
typedef struct dev_struct {
DATA WeightH2H[GLOBAL_W_SIZE];
DATA BiasH2H[GLOBAL_BIAS_SIZE];
DATA Delta[GLOBAL_DELTA_SIZE];
DATA H2H[GLOBAL_H_SIZE];
} dev_struct;
//Texture reference (FOR TARGET MATRIX)
texture<DATA, 2, hipReadModeElementType> texreference_target;
/*UTILITIES*/
static void HandleCuda(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_CUDA( err ) (HandleCuda( err, __FILE__, __LINE__ ))
void startTimer(hipEvent_t *start, hipEvent_t *stop) {
HANDLE_CUDA(hipEventCreate(start));
HANDLE_CUDA(hipEventCreate(stop));
HANDLE_CUDA(hipEventRecord(*start, 0));
}
void stopAndPrint(hipEvent_t *start, hipEvent_t *stop) {
HANDLE_CUDA(hipEventRecord(*stop, 0));
HANDLE_CUDA(hipEventSynchronize(*stop));
float time = 0.0f;
HANDLE_CUDA(hipEventElapsedTime(&time, *start, *stop));
printf("Elapsed Time: %f milliseconds\n", time);
HANDLE_CUDA(hipEventDestroy(*start));
HANDLE_CUDA(hipEventDestroy(*stop));
}
/*DEVICE*/
/*deviceReduceBlockAtomicKernel*/
__inline__ __device__ DATA warpReduceSum(DATA);
__inline__ __device__ DATA blockReduceSum(DATA);
__global__ void deviceReduceBlockAtomicKernel(DATA *, DATA*, int);
/*MMMul(for feedforward)*/
__device__ void MMMulDevPartialFeed(DATA *, DATA *, DATA *, DATA *, DATA*, DATA *, unsigned int, unsigned int, unsigned int, unsigned int);
__global__ void MMMulDevFeed(DATA *, DATA *, DATA *, DATA *, DATA *, DATA*, unsigned int, unsigned int, unsigned int, unsigned int);
/*MMMul(for backpropagation)*/
__device__ void MMMulDevPartialBack(DATA *, DATA *, DATA *, DATA *, unsigned int, unsigned int, unsigned int);
__global__ void MMMulDevBack(DATA *, DATA *, DATA *, DATA *, unsigned int, unsigned int, unsigned int);
/*HOST*/
void FeedAndBack(DATA *, struct host_to_dev_mem *, struct dev_struct *, DATA *, DATA *, int *, int, hipStream_t *, BOOL);
void HOST_feedforward(DATA *, DATA **, DATA **, DATA **, int *);
void printMat(DATA *, int, int);
void printErrorMat(DATA *, DATA*, int, int);
void MMMulHost(DATA *, DATA *, DATA *, DATA *, int, int, int);
BOOL matsAreEquals(DATA *, DATA *, int, int);
DATA errorReductionHost(DATA *, int, int);
/*HOST ALLOCATION AND INITIALIZATION*/
void HOST_init_struct(struct host_to_dev_mem*, int*, int);
/*----------------------------------------------------------------------MAIN---------------------------------------------------------------------------*/
int main(void) {
DATA *INPUT_MAT, *ERROR_MAT, *DEV_ERROR_MAT;
DATA *ERROR, *DEV_ERROR;
DATA *TARGET;
hipStream_t streams[NSTREAMS];
int *nupl = (int*)malloc(TOTAL_LAYER * sizeof(int));
/*++++------------------------------------ERRORS--------------------------------------------------++++*/
ERROR_MAT = (DATA*)malloc(TOTAL_PATT*NEURO_OUTPUT * sizeof(DATA)); // ERROR FOR CHECKING CORRECTNESS
HANDLE_CUDA(hipMalloc((void **)&DEV_ERROR_MAT, TOTAL_PATT*NEURO_OUTPUT * sizeof(DATA))); //DEVICE ERROR MAT
ERROR = (DATA*)malloc(sizeof(DATA)); // ERROR FOR CHECKING CORRECTNESS
HANDLE_CUDA(hipMalloc((void **)&DEV_ERROR, sizeof(DATA))); //DEVICE ERROR
HANDLE_CUDA(hipMemset(DEV_ERROR, 0, sizeof(DATA)));
/*----------------------------------------ERRORS END--------------------------------------------------*/
/*++++---------------------------init INPUT_MAT and TARGET (HOST)-----------------------------++++*/
nupl[0] = NEURO_INPUT;
nupl[1] = NEURO_H_0;
nupl[2] = NEURO_H_1;
nupl[TOTAL_LAYER - 1] = NEURO_OUTPUT;
TARGET = (DATA*)malloc(NEURO_OUTPUT*TOTAL_PATT * sizeof(DATA)); //TARGET OF THE PATTERNS
for (int i = 0; i < TOTAL_PATT; i++) {
for (int j = 0; j < NEURO_OUTPUT; j++) {
TARGET[i*NEURO_OUTPUT + j] = (DATA)rand() / (DATA)RAND_MAX;
}
}
/*INPUT_MAT is pinned memory*/
HANDLE_CUDA(hipHostMalloc(&INPUT_MAT, NEURO_INPUT * TOTAL_PATT * sizeof(DATA), 0));
//DATA r;
for (int i = 0; i < TOTAL_PATT; i++) {
for (int j = 0; j < NEURO_INPUT; j++) {
//r= rand() / (DATA)RAND_MAX;
INPUT_MAT[i*NEURO_INPUT + j] = (DATA)rand() / (DATA)RAND_MAX;
//htdm->H2H[i*NEURO_INPUT+ j] = r;
}
}
/*---------------------------end init INPUT_MAT and TARGET (HOST)-------------------------*/
/*++++---------------------------data structures on host and device-------------------------++++*/
struct host_to_dev_mem *htdm = (struct host_to_dev_mem*)malloc(sizeof(struct host_to_dev_mem));
struct dev_struct *dev_htdm;
//Init weights and biases on host
HOST_init_struct(htdm, nupl, TOTAL_LAYER);
//Malloc the necessary space on device memory
HANDLE_CUDA(hipMalloc((void **)&dev_htdm, sizeof(struct dev_struct)));
/*---------------------------end data structures on host and device----------------------------*/
/*++++---------------------------cuda array for texture-----------------------------++++*/
hipArray* DEV_TARGET_CUDA;
hipChannelFormatDesc channel;
channel = hipCreateChannelDesc<DATA>();
HANDLE_CUDA(hipMallocArray(&DEV_TARGET_CUDA, &channel, NEURO_OUTPUT, TOTAL_PATT));
HANDLE_CUDA(hipMemcpyToArray(DEV_TARGET_CUDA, 0, 0, TARGET, NEURO_OUTPUT*TOTAL_PATT * sizeof(DATA), hipMemcpyHostToDevice));
texreference_target.filterMode = hipFilterModePoint; //turn off the interpolation of hipFilterModeLinear
texreference_target.addressMode[0] = hipAddressModeWrap;//works in normalized coordinates only
texreference_target.addressMode[1] = hipAddressModeClamp;//works in both unnormalized and normalized coordinates
HANDLE_CUDA(hipBindTextureToArray(texreference_target, DEV_TARGET_CUDA)); //Texture reference binding
/*---------------------------end cuda array for texture-------------------------*/
/*++++-----------Streams creation------------++++*/
for (int i = 0; i < NSTREAMS; i++) {
HANDLE_CUDA(hipStreamCreate(&streams[i]));
}
/*---------------end--streams creation-----------*/
/*++++-----------------------------------FEEDFORWARD---AND---BACKPROPAGATION-------------------------------------------++++*/
hipEvent_t start, stop;
startTimer(&start, &stop);
FeedAndBack(INPUT_MAT, htdm, dev_htdm, DEV_ERROR_MAT, DEV_ERROR, nupl, TOTAL_LAYER, streams, 1);
stopAndPrint(&start, &stop);
//hipDeviceSynchronize();//
HANDLE_CUDA(hipMemcpy(ERROR, DEV_ERROR, sizeof(DATA), hipMemcpyDeviceToHost));
printf("Reduced Error: %f\n", *ERROR);
/*
HANDLE_CUDA(hipMemcpy(ERROR_MAT, DEV_ERROR_MAT, TOTAL_PATT*NEURO_OUTPUT * sizeof(DATA), hipMemcpyDeviceToHost));
printMat(ERROR_MAT, TOTAL_PATT, NEURO_OUTPUT);
DATA red_host = errorReductionHost(ERROR_MAT, TOTAL_PATT, NEURO_OUTPUT);
printf("host reduction error : %f\n", red_host);
*/
/*-------------------------------------END---FEEDFORWARD---AND---BACKPROPAGATION-------------------------------------------*/
/*++++--------------------------------deallocations------------------------------------++++*/
//Host dealloc
free(nupl);
free(htdm);
free(TARGET);
free(ERROR_MAT);
free(ERROR);
hipFree(dev_htdm);
hipFree(DEV_ERROR_MAT);
hipFree(DEV_ERROR);
hipHostFree(INPUT_MAT);
//Unbinding texture
hipUnbindTexture(texreference_target);
//Free cuda array
hipFreeArray(DEV_TARGET_CUDA);
/*------------------------------------end deallocations------------------------------------*/
return 0;
}
/*---------------------------------------------------------------------KERNEL--------------------------------------------------------------------------*/
/*DEVICE*/
/*++++---------------------------deviceReduceBlockAtomicKernel---------------------------++++*/
/*Warp reduction*/
__inline__ __device__ DATA warpReduceSum(DATA val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
/*Block reduction*/
__inline__ __device__ DATA blockReduceSum(DATA val) {
static __shared__ DATA shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid == 0) val = warpReduceSum(val);
return val;
}
/*Reducing large arrays--Blocks implementation*/
//Nella chiamata di questo kernel meglio usare una griglia lineare di 8 blocchi con 256 threads ciascuno --
//In tal modo vengono limitati gli accessi alla shared memory rispetto all'implementazione con 2 blocchi da 1024 threads ciascuno
//Attenzione ai possibili arrotondamenti di numeri a virgola mobile dovuti alle atomicAdd.
__global__ void deviceReduceBlockAtomicKernel(DATA *in, DATA* out, int N) {
DATA sum = 0.0f;
for (int i = blockIdx.x * blockDim.x + threadIdx.x ; i < N ; i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
atomicAdd(out, sum);
}
/*-------------------------------end--deviceReduceBlockAtomicKernel--------------------------*/
/*++++---------------------------MMMul--Feedforward-------------------------++++*/
/* h2h il puntatore alla porzione dell'h2h globale da considerare in questa fase
(ad ogni passo il kernel che invoca questo device incrementa il puntatore h2h
in modo proporzionale al patt_per_step (e similmente h2h_dest) (vedi sotto)).
offset_y la posizione considerata lungo le y (nelle matrici h2h, h2h_dest ed eventualmente error) durante la chiamata corrente a __device__.
Delta calcolato per l'output layer (propagato poi con backpropagation) --> DeltaO[p][k] = (Target[p][k] - Output[p][k]) * Output[p][k] * (1.0 - Output[p][k]) ;
*/
__device__ void MMMulDevPartialFeed(DATA *h2h, DATA *w, DATA *biases, DATA *h2h_dest, DATA *delta, DATA *error, unsigned int row_w, unsigned int col_w, unsigned int num_pattern, unsigned int offset_y) {
int tx = threadIdx.x, ty = threadIdx.y;
int block_x = blockIdx.x;
int block_y = blockIdx.y;
int block_dim = blockDim.x; // assumiamo che i blocchi siano quadrati
int dest_x = block_x*block_dim + tx;
int dest_y = block_y*block_dim + ty;
int w_x = block_x*block_dim; // start block in w
int h2h_y = block_y*block_dim*row_w; // start block in h2h
int end_h2h = h2h_y + row_w - 1; // last block position in h2h
int step_w = block_dim*col_w;
int step_h2h = block_dim;
DATA partial = 0.0f;
int block_r_border = 0; // contatore che indica in che iterazione dei blocchi ci troviamo
int current_inc;
int min;
for (int wid = w_x, h2h_id = h2h_y; h2h_id <= end_h2h; wid += step_w, h2h_id += step_h2h) {
block_r_border += block_dim;
//__shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER+1]; Non possiamo ancora giustificare il miglioramento nei tempi.
__shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
__shared__ DATA shared_h2h[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
int t_index_w = wid + tx + ty*col_w;
int t_index_h2h = h2h_id + tx + ty*row_w;
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
shared_h2h[ty][tx] = (t_index_h2h < num_pattern*row_w) ? (h2h[t_index_h2h]) : (0.0f);
shared_w[ty][tx] = (t_index_w < col_w*row_w) ? (w[t_index_w]) : (0.0f);
__syncthreads();
current_inc = row_w - (block_r_border - block_dim);
min = (current_inc < block_dim) ? (current_inc) : (block_dim);
#pragma unroll(2)
for (int k = 0; k < min; k++) {
partial += shared_h2h[ty][k] * shared_w[k][tx];
}
__syncthreads();
}
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
if (dest_x < col_w && dest_y < num_pattern) {
DATA out = (DATA)1.0 / (DATA)(1.0 + exp(-(partial + biases[dest_x])));
h2h_dest[dest_y*col_w + dest_x] = out; //SIGMA
//Se siamo nell'ultimo passo
if (col_w == NEURO_OUTPUT) {
DATA target = tex2D(texreference_target, dest_x, dest_y + offset_y);
//Scrivi nella posizione corrispondente della matrice di ERRORE
/*0.5*(Target[p][k] - Output[p][k])*(Target[p][k] - Output[p][k])*/
error[dest_y*col_w + dest_x] = 0.5*(target - out)*(target - out);
//Scrivi nella posizione corrispondente della matrice DELTA
/*(Target[p][k] - Output[p][k]) * Output[p][k] * (1.0 - Output[p][k])*/
delta[dest_y*col_w + dest_x] = (target - out)*(out)*(1 - out);
}
}
}
/*patt_per_step il numero di pattern (quando possibile...) da considerare in ciascuna iterazione su h2h*/
/*Questo kernel ad ogni passo incrementa il puntatore ad h2h di num_patt_per_step*NEURO_L_L_1 (e similmente h2h_dest),
controlla che sia ancora nel range di h2h, e calcola num_pattern (vedi sopra) in funzione dei
pattern mancanti.
stream_offset_y la posizione lungo le y da cui parte (nelle matrici h2h e h2h_dest) lo stream corrente.
*/
//Dove ora c' STREAMSIZE prima c'era TOTAL_PATT
__global__ void MMMulDevFeed(DATA *h2h, DATA *w, DATA *biases, DATA *h2h_dest, DATA *delta, DATA *error, unsigned int row_w, unsigned int col_w, unsigned int patt_per_step, unsigned int stream_offset_y) {
unsigned int current_patts;
unsigned int remaining_patts;
const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y
//Assumiamo che i blocchi siano quadrati (blockDim.x = blockDim.y)
for (unsigned int y = 0; y < STREAMSIZE; y += patt_per_step) {
remaining_patts = STREAMSIZE - y;
current_patts = (remaining_patts < patt_per_step) ? (remaining_patts) : (patt_per_step);
if (pos_block_y >= current_patts) { return; }
MMMulDevPartialFeed(h2h + y*row_w, w, biases, h2h_dest + y*col_w, delta + y*NEURO_OUTPUT, error + y*NEURO_OUTPUT, row_w, col_w, current_patts, stream_offset_y + y);
}
}
/*-------------------------------end--MMMul--Feedforward------------------------*/
/*++++---------------------------MMMul--BackPropagation-------------------------++++*/
__device__ void MMMulDevPartialBack(DATA *delta_l, DATA *w, DATA *delta_l_1, DATA *h2h_l_1, unsigned int row_w, unsigned int col_w, unsigned int num_pattern) {
int tx = threadIdx.x, ty = threadIdx.y;
int block_x = blockIdx.x;
int block_y = blockIdx.y;
int block_dim = blockDim.x; // assumiamo che i blocchi siano quadrati
int dest_x = block_x*block_dim + tx;
int dest_y = block_y*block_dim + ty;
//Dobbiamo scorrere la matrice w per righe (stiamo considerando w come fosse trasposta -- vedi 16 febbraio su diario)
int w_y = block_x*block_dim*col_w; // start block in w
int delta_l_y = block_y*block_dim*col_w; // start block in delta_l_y
int end_delta_l = delta_l_y + col_w - 1; // last block position in h2h
int step_w_y = block_dim;
int step_delta_l = block_dim;
DATA partial = 0.0f;
int block_r_border = 0; // contatore che indica in che iterazione dei blocchi ci troviamo
int current_inc;
int min;
for (int wid = w_y, deltaid = delta_l_y; deltaid <= end_delta_l; wid += step_w_y, deltaid += step_delta_l) {
block_r_border += block_dim;
__shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
__shared__ DATA shared_delta_l[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
int t_index_w = wid + tx + ty*col_w;
int t_index_delta_l = deltaid + tx + ty*col_w;
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
shared_delta_l[ty][tx] = (t_index_delta_l < num_pattern*col_w) ? (delta_l[t_index_delta_l]) : (0.0f);
//Salviamo la sottomatrice trasposta nella shared memory della matrice dei pesi (osservare che in tal modo evitiamo conflitti di banco):
shared_w[tx][ty] = (t_index_w < row_w*col_w) ? (w[t_index_w]) : (0.0f);
__syncthreads();
current_inc = col_w - (block_r_border - block_dim);
min = (current_inc < block_dim) ? (current_inc) : (block_dim);
#pragma unroll(2)
for (int k = 0; k < min; k++) {
partial += shared_delta_l[ty][k] * shared_w[k][tx];
}
__syncthreads();
}
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
if (dest_x < row_w && dest_y < num_pattern) {
//Backpropagate the delta to the previous layer
DATA h2h_l_1_target = h2h_l_1[dest_y*row_w + dest_x];
delta_l_1[dest_y*row_w + dest_x] = partial*h2h_l_1_target*(1 - h2h_l_1_target);
}
}
__global__ void MMMulDevBack(DATA *delta_l, DATA *w, DATA *delta_l_1, DATA *h2h_l_1, unsigned int row_w, unsigned int col_w, unsigned int patt_per_step) {
unsigned int current_patts;
unsigned int remaining_patts;
const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y
//Assumiamo che i blocchi siano quadrati (blockDim.x = blockDim.y)
for (unsigned int y = 0; y < STREAMSIZE; y += patt_per_step) {
remaining_patts = STREAMSIZE - y;
current_patts = (remaining_patts < patt_per_step) ? (remaining_patts) : (patt_per_step);
if (pos_block_y >= current_patts) { return; }
MMMulDevPartialBack(delta_l + y*col_w, w, delta_l_1 + y*row_w, h2h_l_1 + y*row_w, row_w, col_w, current_patts);
}
}
/*-------------------------------end--MMMul--BackPropagation------------------------*/
/*HOST*/
/*FEEDFORWARD AND BACKPROPAGATION PHASES -- THE INPUT IS TRANSMITTED VIA THE NETWORK AND IN BACK PROPAGATED*/
void FeedAndBack(DATA *INPUT, struct host_to_dev_mem * htdm, struct dev_struct *dev_htdm, DATA *dev_error_mat, DATA *dev_error, int *nupl, int layers, hipStream_t *streams, BOOL first_epoch) {
//hipEvent_t start, stop;
//Grid setting
dim3 grid, block;
unsigned int patt_per_step;
//Useful pointers
DATA *h2h, *w, *bias, *h2h_dest, *delta, *error;
//Delta da cui parte l'informazione (delta_l) e delta dove arriva tramite la backpropagation (delta_l_1)
DATA *delta_l, *delta_l_1;
//offset
int offset;
//startTimer(&start, &stop);
if (first_epoch) {
HANDLE_CUDA(hipMemcpy(dev_htdm, htdm, (GLOBAL_BIAS_SIZE + GLOBAL_W_SIZE) * sizeof(DATA), hipMemcpyHostToDevice));
}
//stopAndPrint(&start, &stop);
for (int i = 0; i < NSTREAMS; i++) {
//Leggere 15 febbraio del diario (passo 1 del feedforward, considerazioni)
block.x = gs.block[0];
block.y = gs.block[0];
grid.x = (nupl[1] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[0] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
offset = i*STREAMSIZE;
//Set pointers
h2h = dev_htdm->H2H + offset*nupl[0];
w = dev_htdm->WeightH2H;
bias = dev_htdm->BiasH2H;
h2h_dest = dev_htdm->H2H + htdm->matrix_H2H_index[0][1] + offset*nupl[1];
delta = dev_htdm->Delta + htdm->matrix_DELTA_index[0][layers - 2] + offset*nupl[layers - 1];
error = dev_error_mat + offset*nupl[layers - 1];
//Pointers set up
if (first_epoch) {
HANDLE_CUDA(hipMemcpyAsync(h2h, INPUT + offset*nupl[0], nupl[0] * STREAMSIZE * sizeof(DATA), hipMemcpyHostToDevice, streams[i]));
}
//First Feedforward step:
MMMulDevFeed << <grid, block, 0, streams[i] >> > (h2h, w, bias, h2h_dest, delta, error, nupl[0], nupl[1], patt_per_step, offset);
for (int l = 1; l < (layers - 2); l++) {
block.x = gs.block[l];
block.y = gs.block[l];
grid.x = (nupl[l + 1] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[l] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
//Set pointers
h2h = dev_htdm->H2H + htdm->matrix_H2H_index[0][l] + offset*nupl[l];
w = dev_htdm->WeightH2H + htdm->matrix_WB_index[0][l];
bias = dev_htdm->BiasH2H + htdm->matrix_WB_index[1][l];
h2h_dest = dev_htdm->H2H + htdm->matrix_H2H_index[0][l + 1] + offset*nupl[l + 1];
//Delta and error already set up
//Pointers set up
MMMulDevFeed << <grid, block, 0, streams[i] >> > (h2h, w, bias, h2h_dest, delta, error, nupl[l], nupl[l + 1], patt_per_step, offset);
}
//Last Feedforward step:
block.x = gs.block[layers - 2];
block.y = gs.block[layers - 2];
grid.x = (nupl[layers - 1] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[layers - 2] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
//Set pointers
h2h = dev_htdm->H2H + htdm->matrix_H2H_index[0][layers - 2] + offset*nupl[layers - 2];
w = dev_htdm->WeightH2H + htdm->matrix_WB_index[0][layers - 2];
bias = dev_htdm->BiasH2H + htdm->matrix_WB_index[1][layers - 2];
h2h_dest = dev_htdm->H2H + htdm->matrix_H2H_index[0][layers - 1] + offset*nupl[layers - 1];
//Delta and error already set up
//Pointers set up
MMMulDevFeed << <grid, block, 0, streams[i] >> > (h2h, w, bias, h2h_dest, delta, error, nupl[layers - 2], nupl[layers - 1], patt_per_step, offset);
//BackPropagate for all streams:
for (int delta_index = (layers - 2); delta_index > 0; delta_index--) {
block.x = gs.block[delta_index];
block.y = gs.block[delta_index];
grid.x = (nupl[delta_index] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[delta_index] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
//Set pointers
h2h = dev_htdm->H2H + htdm->matrix_H2H_index[0][delta_index] + offset*nupl[delta_index];
w = dev_htdm->WeightH2H + htdm->matrix_WB_index[0][delta_index];
delta_l = dev_htdm->Delta + htdm->matrix_DELTA_index[0][delta_index] + offset*nupl[delta_index + 1];
delta_l_1 = dev_htdm->Delta + htdm->matrix_DELTA_index[0][delta_index - 1] + offset*nupl[delta_index];
//Pointers set up
MMMulDevBack << <grid, block, 0, streams[i] >> > (delta_l, w, delta_l_1, h2h, nupl[delta_index], nupl[delta_index + 1], patt_per_step);
}
}
//**HERE**
//Error reduction (default stream)
deviceReduceBlockAtomicKernel << <OPTIMUM_BLOCK_NUM * 2, BLOCK_SIDE*BLOCK_SIDE >> > (dev_error_mat, dev_error, TOTAL_PATT*nupl[layers-1]);
}
/*UTILITY FUNCTIONS*/
void HOST_feedforward(DATA *INPUT, DATA **W, DATA **BIAS, DATA **H2H, int *nupl) {
MMMulHost(INPUT, W[0], BIAS[0], H2H[0], TOTAL_PATT, nupl[0], nupl[1]);
MMMulHost(H2H[0], W[1], BIAS[1], H2H[1], TOTAL_PATT, nupl[1], nupl[2]);
MMMulHost(H2H[1], W[2], BIAS[2], H2H[2], TOTAL_PATT, nupl[2], nupl[3]);
}
/*Print a matrix*/
void printMat(DATA *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
printf("ROW %d : {", i);
for (int j = 0; j < cols; j++) {
printf("%f - ", mat[i*cols + j]);
}
printf("}");
printf("\n\n");
}
printf("\n\n");
}
/*Print error matrix on host (for checking correctness of device)*/
void printErrorMat(DATA *TARGET, DATA *OUTPUT_MAT, int rows, int cols) {
for (int i = 0; i < rows; i++) {
printf("ROW %d : {", i);
for (int j = 0; j < cols; j++) {
printf("%f - ", 0.5*(TARGET[i*cols + j] - OUTPUT_MAT[i*cols + j])*(TARGET[i*cols + j] - OUTPUT_MAT[i*cols + j]));
}
printf("}");
printf("\n\n");
}
printf("\n\n");
}
/*On host multiplication*/
void MMMulHost(DATA *H2H, DATA *W, DATA *BIAS, DATA *H2H_RES, int row_H2H, int col_H2H, int col_W) {
for (int i = 0; i < row_H2H; i++) {
for (int j = 0; j < col_W; j++) {
DATA prod = 0.0;
for (int k = 0; k < col_H2H; k++) {
prod += H2H[i*col_H2H + k] * W[k*col_W + j];
}
H2H_RES[i*col_W + j] = (DATA)1.0 / (DATA)(1.0 + exp(-(prod + BIAS[j]))); // bias added
}
}
}
/*Check device*/
BOOL matsAreEquals(DATA *A, DATA *B, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) { // the first column is for adapting the data
float err = fabs(A[i*cols + j] - B[i*cols + j]);
//printf("Error in i=%d,j=%d: %f\n", i, j, err);
if (err >= MAX_ERR) { printf("row: %d, col: %d\n", i, j); return 0; }
}
}
return 1;
}
/*Check device reduction*/
DATA errorReductionHost(DATA *error_mat, int rows, int cols) {
DATA reduction = 0.0f;
for (int i = 0; i < rows*cols; i++) {
reduction += error_mat[i];
}
return reduction;
}
/*ALLOCATION FUNCTIONS*/
/*init struct on host*/
void HOST_init_struct(struct host_to_dev_mem* htdm, int* nupl, int layers) {
int prev_sum[MATRIX_NUMBER_STRUCT];
htdm->matrix_H2H_index[0][0] = 0;
htdm->matrix_DELTA_index[0][0] = 0;
htdm->matrix_WB_index[0][0] = 0;
htdm->matrix_WB_index[1][0] = 0;
//Bisogner inserire i controlli sulle malloc
/*il padding della matrice al layer corrente dipende da quello dei layer precedenti*/
for (int layer = 1; layer<(layers - 1); layer++) {
prev_sum[0] = htdm->matrix_H2H_index[0][layer - 1];
prev_sum[1] = htdm->matrix_DELTA_index[0][layer - 1];
prev_sum[2] = htdm->matrix_WB_index[0][layer - 1];
prev_sum[3] = htdm->matrix_WB_index[1][layer - 1];
htdm->matrix_H2H_index[0][layer] = nupl[layer - 1] * TOTAL_PATT + prev_sum[0];
htdm->matrix_DELTA_index[0][layer] = nupl[layer] * TOTAL_PATT + prev_sum[1];
htdm->matrix_WB_index[0][layer] = nupl[layer - 1] * nupl[layer] + prev_sum[2];
htdm->matrix_WB_index[1][layer] = nupl[layer] + prev_sum[3];
for (int i = 0; i < nupl[layer]; i++) {
for (int j = 0; j < nupl[layer + 1]; j++) {
htdm->WeightH2H[htdm->matrix_WB_index[0][layer] + i*nupl[layer + 1] + j] = (DATA)rand() / (DATA)RAND_MAX;
htdm->BiasH2H[htdm->matrix_WB_index[1][layer] + j] = (DATA)rand() / (DATA)RAND_MAX;
}
}
}
prev_sum[0] = htdm->matrix_H2H_index[0][layers - 2];
htdm->matrix_H2H_index[0][layers - 1] = nupl[layers - 2] * TOTAL_PATT + prev_sum[0];
for (int i = 0; i < nupl[0]; i++) {
for (int j = 0; j < nupl[1]; j++) {
htdm->WeightH2H[i*nupl[1] + j] = (DATA)rand() / (DATA)RAND_MAX;
htdm->BiasH2H[j] = (DATA)rand() / (DATA)RAND_MAX;
}
}
}
//NON CANCELLARE !!! INSERIRE NEL FEEDFORWARD PER FARE TEST DI CORRETTEZZA NEL PUNTO **HERE**!!!
//RICORDARSI DI DECOMMENTARE LA 'r' NEL MAIN
/*
hipDeviceSynchronize();
DATA **H2H_RES = (DATA**)malloc(TOTAL_LAYER * sizeof(DATA*));
for (int i = 0; i < TOTAL_LAYER; i++) {
H2H_RES[i] = (DATA*)malloc(TOTAL_PATT*nupl[i] * sizeof(DATA));
}
for (int l = 0; l < (layers - 1); l++) {
HANDLE_CUDA(hipMemcpy(htdm->H2H+ htdm->matrix_H2H_index[0][l+1],dev_htdm->H2H + htdm->matrix_H2H_index[0][l+1], (TOTAL_PATT)* nupl[l+1] * sizeof(DATA), hipMemcpyDeviceToHost));
MMMulHost( htdm->H2H + htdm->matrix_H2H_index[0][l], htdm->WeightH2H + htdm->matrix_WB_index[0][l] , htdm->BiasH2H + htdm->matrix_WB_index[1][l], H2H_RES[l + 1], TOTAL_PATT, nupl[l], nupl[l + 1]);
BOOL b = matsAreEquals(htdm->H2H+ htdm->matrix_H2H_index[0][l+1], H2H_RES[l + 1], TOTAL_PATT, nupl[l + 1]);
printf("layer%d %d\n",l, b);
}*/ | 9b9a726b7931d39ee62537d64f023d92ccaef2b4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define DATA float
#define BOOL int
#define MAX_ERR 1e-5
#define MAX(a,b) ((a)>(b)?(a):(b))
//Grid features
//Leggere 15 febbraio del diario (passo 1 del feedforward, considerazioni)
#define OPTIMUM_BLOCK_NUM 4
#define BLOCK_SIDE 16
#define OPTIMUM_BLOCK_NUM_FIRST_LAYER 2
#define BLOCK_SIDE_FIRST_LAYER 32
/*Struct Grid Settings*/
typedef struct grid_settings {
unsigned int grid[3];
unsigned int block[3];
}grid_settings;
grid_settings gs = { { OPTIMUM_BLOCK_NUM_FIRST_LAYER, OPTIMUM_BLOCK_NUM, OPTIMUM_BLOCK_NUM },{ BLOCK_SIDE_FIRST_LAYER,BLOCK_SIDE,BLOCK_SIDE } };
//Network features
#define NEURO_INPUT 784 //#neurons of input layer
#define NEURO_H_0 56 //#neurons of first hidden layer
#define NEURO_H_1 28 //#neurons of second hidden layer
#define NEURO_OUTPUT 10 //#neurons of output layer
#define TOTAL_PATT 60000 //#total patterns
#define NUM_HIDDEN 2 //#hidden layers
#define TOTAL_LAYER 4 //#of layers
//Streams Settings
#define NSTREAMS 3
#define STREAMSIZE TOTAL_PATT/NSTREAMS
/*Struct One Copy HostToDev -- Contains weights and bias*/
//struct features
#define MATRIX_NUMBER_STRUCT 4 //#matrix to copy to Device(in struct)
#define GLOBAL_H_SIZE TOTAL_PATT * (NEURO_INPUT + NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT)
#define GLOBAL_DELTA_SIZE TOTAL_PATT * (NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT)
#define GLOBAL_W_SIZE (NEURO_INPUT*NEURO_H_0) + (NEURO_H_0*NEURO_H_1) + (NEURO_H_1*NEURO_OUTPUT)
#define GLOBAL_BIAS_SIZE NEURO_H_0 + NEURO_H_1 + NEURO_OUTPUT
typedef struct host_to_dev_mem {
DATA WeightH2H[GLOBAL_W_SIZE];
DATA BiasH2H[GLOBAL_BIAS_SIZE];
DATA Delta[GLOBAL_DELTA_SIZE];
DATA H2H[GLOBAL_H_SIZE];
int matrix_WB_index[MATRIX_NUMBER_STRUCT - 2][TOTAL_LAYER - 1];//INDEX for padding in Weight & Bias
int matrix_DELTA_index[MATRIX_NUMBER_STRUCT - 3][TOTAL_LAYER - 1];//INDEX for padding in Delta
int matrix_H2H_index[MATRIX_NUMBER_STRUCT - 3][TOTAL_LAYER];//INDEX for padding in H2H
} host_to_dev_mem;
typedef struct dev_struct {
DATA WeightH2H[GLOBAL_W_SIZE];
DATA BiasH2H[GLOBAL_BIAS_SIZE];
DATA Delta[GLOBAL_DELTA_SIZE];
DATA H2H[GLOBAL_H_SIZE];
} dev_struct;
//Texture reference (FOR TARGET MATRIX)
texture<DATA, 2, cudaReadModeElementType> texreference_target;
/*UTILITIES*/
static void HandleCuda(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_CUDA( err ) (HandleCuda( err, __FILE__, __LINE__ ))
void startTimer(cudaEvent_t *start, cudaEvent_t *stop) {
HANDLE_CUDA(cudaEventCreate(start));
HANDLE_CUDA(cudaEventCreate(stop));
HANDLE_CUDA(cudaEventRecord(*start, 0));
}
void stopAndPrint(cudaEvent_t *start, cudaEvent_t *stop) {
HANDLE_CUDA(cudaEventRecord(*stop, 0));
HANDLE_CUDA(cudaEventSynchronize(*stop));
float time = 0.0f;
HANDLE_CUDA(cudaEventElapsedTime(&time, *start, *stop));
printf("Elapsed Time: %f milliseconds\n", time);
HANDLE_CUDA(cudaEventDestroy(*start));
HANDLE_CUDA(cudaEventDestroy(*stop));
}
/*DEVICE*/
/*deviceReduceBlockAtomicKernel*/
__inline__ __device__ DATA warpReduceSum(DATA);
__inline__ __device__ DATA blockReduceSum(DATA);
__global__ void deviceReduceBlockAtomicKernel(DATA *, DATA*, int);
/*MMMul(for feedforward)*/
__device__ void MMMulDevPartialFeed(DATA *, DATA *, DATA *, DATA *, DATA*, DATA *, unsigned int, unsigned int, unsigned int, unsigned int);
__global__ void MMMulDevFeed(DATA *, DATA *, DATA *, DATA *, DATA *, DATA*, unsigned int, unsigned int, unsigned int, unsigned int);
/*MMMul(for backpropagation)*/
__device__ void MMMulDevPartialBack(DATA *, DATA *, DATA *, DATA *, unsigned int, unsigned int, unsigned int);
__global__ void MMMulDevBack(DATA *, DATA *, DATA *, DATA *, unsigned int, unsigned int, unsigned int);
/*HOST*/
void FeedAndBack(DATA *, struct host_to_dev_mem *, struct dev_struct *, DATA *, DATA *, int *, int, cudaStream_t *, BOOL);
void HOST_feedforward(DATA *, DATA **, DATA **, DATA **, int *);
void printMat(DATA *, int, int);
void printErrorMat(DATA *, DATA*, int, int);
void MMMulHost(DATA *, DATA *, DATA *, DATA *, int, int, int);
BOOL matsAreEquals(DATA *, DATA *, int, int);
DATA errorReductionHost(DATA *, int, int);
/*HOST ALLOCATION AND INITIALIZATION*/
void HOST_init_struct(struct host_to_dev_mem*, int*, int);
/*----------------------------------------------------------------------MAIN---------------------------------------------------------------------------*/
int main(void) {
DATA *INPUT_MAT, *ERROR_MAT, *DEV_ERROR_MAT;
DATA *ERROR, *DEV_ERROR;
DATA *TARGET;
cudaStream_t streams[NSTREAMS];
int *nupl = (int*)malloc(TOTAL_LAYER * sizeof(int));
/*++++------------------------------------ERRORS--------------------------------------------------++++*/
ERROR_MAT = (DATA*)malloc(TOTAL_PATT*NEURO_OUTPUT * sizeof(DATA)); // ERROR FOR CHECKING CORRECTNESS
HANDLE_CUDA(cudaMalloc((void **)&DEV_ERROR_MAT, TOTAL_PATT*NEURO_OUTPUT * sizeof(DATA))); //DEVICE ERROR MAT
ERROR = (DATA*)malloc(sizeof(DATA)); // ERROR FOR CHECKING CORRECTNESS
HANDLE_CUDA(cudaMalloc((void **)&DEV_ERROR, sizeof(DATA))); //DEVICE ERROR
HANDLE_CUDA(cudaMemset(DEV_ERROR, 0, sizeof(DATA)));
/*----------------------------------------ERRORS END--------------------------------------------------*/
/*++++---------------------------init INPUT_MAT and TARGET (HOST)-----------------------------++++*/
nupl[0] = NEURO_INPUT;
nupl[1] = NEURO_H_0;
nupl[2] = NEURO_H_1;
nupl[TOTAL_LAYER - 1] = NEURO_OUTPUT;
TARGET = (DATA*)malloc(NEURO_OUTPUT*TOTAL_PATT * sizeof(DATA)); //TARGET OF THE PATTERNS
for (int i = 0; i < TOTAL_PATT; i++) {
for (int j = 0; j < NEURO_OUTPUT; j++) {
TARGET[i*NEURO_OUTPUT + j] = (DATA)rand() / (DATA)RAND_MAX;
}
}
/*INPUT_MAT is pinned memory*/
HANDLE_CUDA(cudaHostAlloc(&INPUT_MAT, NEURO_INPUT * TOTAL_PATT * sizeof(DATA), 0));
//DATA r;
for (int i = 0; i < TOTAL_PATT; i++) {
for (int j = 0; j < NEURO_INPUT; j++) {
//r= rand() / (DATA)RAND_MAX;
INPUT_MAT[i*NEURO_INPUT + j] = (DATA)rand() / (DATA)RAND_MAX;
//htdm->H2H[i*NEURO_INPUT+ j] = r;
}
}
/*---------------------------end init INPUT_MAT and TARGET (HOST)-------------------------*/
/*++++---------------------------data structures on host and device-------------------------++++*/
struct host_to_dev_mem *htdm = (struct host_to_dev_mem*)malloc(sizeof(struct host_to_dev_mem));
struct dev_struct *dev_htdm;
//Init weights and biases on host
HOST_init_struct(htdm, nupl, TOTAL_LAYER);
//Malloc the necessary space on device memory
HANDLE_CUDA(cudaMalloc((void **)&dev_htdm, sizeof(struct dev_struct)));
/*---------------------------end data structures on host and device----------------------------*/
/*++++---------------------------cuda array for texture-----------------------------++++*/
cudaArray* DEV_TARGET_CUDA;
cudaChannelFormatDesc channel;
channel = cudaCreateChannelDesc<DATA>();
HANDLE_CUDA(cudaMallocArray(&DEV_TARGET_CUDA, &channel, NEURO_OUTPUT, TOTAL_PATT));
HANDLE_CUDA(cudaMemcpyToArray(DEV_TARGET_CUDA, 0, 0, TARGET, NEURO_OUTPUT*TOTAL_PATT * sizeof(DATA), cudaMemcpyHostToDevice));
texreference_target.filterMode = cudaFilterModePoint; //turn off the interpolation of cudaFilterModeLinear
texreference_target.addressMode[0] = cudaAddressModeWrap;//works in normalized coordinates only
texreference_target.addressMode[1] = cudaAddressModeClamp;//works in both unnormalized and normalized coordinates
HANDLE_CUDA(cudaBindTextureToArray(texreference_target, DEV_TARGET_CUDA)); //Texture reference binding
/*---------------------------end cuda array for texture-------------------------*/
/*++++-----------Streams creation------------++++*/
for (int i = 0; i < NSTREAMS; i++) {
HANDLE_CUDA(cudaStreamCreate(&streams[i]));
}
/*---------------end--streams creation-----------*/
/*++++-----------------------------------FEEDFORWARD---AND---BACKPROPAGATION-------------------------------------------++++*/
cudaEvent_t start, stop;
startTimer(&start, &stop);
FeedAndBack(INPUT_MAT, htdm, dev_htdm, DEV_ERROR_MAT, DEV_ERROR, nupl, TOTAL_LAYER, streams, 1);
stopAndPrint(&start, &stop);
//cudaDeviceSynchronize();//
HANDLE_CUDA(cudaMemcpy(ERROR, DEV_ERROR, sizeof(DATA), cudaMemcpyDeviceToHost));
printf("Reduced Error: %f\n", *ERROR);
/*
HANDLE_CUDA(cudaMemcpy(ERROR_MAT, DEV_ERROR_MAT, TOTAL_PATT*NEURO_OUTPUT * sizeof(DATA), cudaMemcpyDeviceToHost));
printMat(ERROR_MAT, TOTAL_PATT, NEURO_OUTPUT);
DATA red_host = errorReductionHost(ERROR_MAT, TOTAL_PATT, NEURO_OUTPUT);
printf("host reduction error : %f\n", red_host);
*/
/*-------------------------------------END---FEEDFORWARD---AND---BACKPROPAGATION-------------------------------------------*/
/*++++--------------------------------deallocations------------------------------------++++*/
//Host dealloc
free(nupl);
free(htdm);
free(TARGET);
free(ERROR_MAT);
free(ERROR);
cudaFree(dev_htdm);
cudaFree(DEV_ERROR_MAT);
cudaFree(DEV_ERROR);
cudaFreeHost(INPUT_MAT);
//Unbinding texture
cudaUnbindTexture(texreference_target);
//Free cuda array
cudaFreeArray(DEV_TARGET_CUDA);
/*------------------------------------end deallocations------------------------------------*/
return 0;
}
/*---------------------------------------------------------------------KERNEL--------------------------------------------------------------------------*/
/*DEVICE*/
/*++++---------------------------deviceReduceBlockAtomicKernel---------------------------++++*/
/*Warp reduction*/
__inline__ __device__ DATA warpReduceSum(DATA val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
/*Block reduction*/
__inline__ __device__ DATA blockReduceSum(DATA val) {
static __shared__ DATA shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid == 0) val = warpReduceSum(val);
return val;
}
/*Reducing large arrays--Blocks implementation*/
//Nella chiamata di questo kernel č meglio usare una griglia lineare di 8 blocchi con 256 threads ciascuno --
//In tal modo vengono limitati gli accessi alla shared memory rispetto all'implementazione con 2 blocchi da 1024 threads ciascuno
//Attenzione ai possibili arrotondamenti di numeri a virgola mobile dovuti alle atomicAdd.
__global__ void deviceReduceBlockAtomicKernel(DATA *in, DATA* out, int N) {
DATA sum = 0.0f;
for (int i = blockIdx.x * blockDim.x + threadIdx.x ; i < N ; i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
atomicAdd(out, sum);
}
/*-------------------------------end--deviceReduceBlockAtomicKernel--------------------------*/
/*++++---------------------------MMMul--Feedforward-------------------------++++*/
/* h2h č il puntatore alla porzione dell'h2h globale da considerare in questa fase
(ad ogni passo il kernel che invoca questo device incrementa il puntatore h2h
in modo proporzionale al patt_per_step (e similmente h2h_dest) (vedi sotto)).
offset_y č la posizione considerata lungo le y (nelle matrici h2h, h2h_dest ed eventualmente error) durante la chiamata corrente a __device__.
Delta č calcolato per l'output layer (propagato poi con backpropagation) --> DeltaO[p][k] = (Target[p][k] - Output[p][k]) * Output[p][k] * (1.0 - Output[p][k]) ;
*/
__device__ void MMMulDevPartialFeed(DATA *h2h, DATA *w, DATA *biases, DATA *h2h_dest, DATA *delta, DATA *error, unsigned int row_w, unsigned int col_w, unsigned int num_pattern, unsigned int offset_y) {
int tx = threadIdx.x, ty = threadIdx.y;
int block_x = blockIdx.x;
int block_y = blockIdx.y;
int block_dim = blockDim.x; // assumiamo che i blocchi siano quadrati
int dest_x = block_x*block_dim + tx;
int dest_y = block_y*block_dim + ty;
int w_x = block_x*block_dim; // start block in w
int h2h_y = block_y*block_dim*row_w; // start block in h2h
int end_h2h = h2h_y + row_w - 1; // last block position in h2h
int step_w = block_dim*col_w;
int step_h2h = block_dim;
DATA partial = 0.0f;
int block_r_border = 0; // contatore che indica in che iterazione dei blocchi ci troviamo
int current_inc;
int min;
for (int wid = w_x, h2h_id = h2h_y; h2h_id <= end_h2h; wid += step_w, h2h_id += step_h2h) {
block_r_border += block_dim;
//__shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER+1]; Non possiamo ancora giustificare il miglioramento nei tempi.
__shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
__shared__ DATA shared_h2h[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
int t_index_w = wid + tx + ty*col_w;
int t_index_h2h = h2h_id + tx + ty*row_w;
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
shared_h2h[ty][tx] = (t_index_h2h < num_pattern*row_w) ? (h2h[t_index_h2h]) : (0.0f);
shared_w[ty][tx] = (t_index_w < col_w*row_w) ? (w[t_index_w]) : (0.0f);
__syncthreads();
current_inc = row_w - (block_r_border - block_dim);
min = (current_inc < block_dim) ? (current_inc) : (block_dim);
#pragma unroll(2)
for (int k = 0; k < min; k++) {
partial += shared_h2h[ty][k] * shared_w[k][tx];
}
__syncthreads();
}
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
if (dest_x < col_w && dest_y < num_pattern) {
DATA out = (DATA)1.0 / (DATA)(1.0 + exp(-(partial + biases[dest_x])));
h2h_dest[dest_y*col_w + dest_x] = out; //SIGMA
//Se siamo nell'ultimo passo
if (col_w == NEURO_OUTPUT) {
DATA target = tex2D(texreference_target, dest_x, dest_y + offset_y);
//Scrivi nella posizione corrispondente della matrice di ERRORE
/*0.5*(Target[p][k] - Output[p][k])*(Target[p][k] - Output[p][k])*/
error[dest_y*col_w + dest_x] = 0.5*(target - out)*(target - out);
//Scrivi nella posizione corrispondente della matrice DELTA
/*(Target[p][k] - Output[p][k]) * Output[p][k] * (1.0 - Output[p][k])*/
delta[dest_y*col_w + dest_x] = (target - out)*(out)*(1 - out);
}
}
}
/*patt_per_step č il numero di pattern (quando possibile...) da considerare in ciascuna iterazione su h2h*/
/*Questo kernel ad ogni passo incrementa il puntatore ad h2h di num_patt_per_step*NEURO_L_L_1 (e similmente h2h_dest),
controlla che sia ancora nel range di h2h, e calcola num_pattern (vedi sopra) in funzione dei
pattern mancanti.
stream_offset_y č la posizione lungo le y da cui parte (nelle matrici h2h e h2h_dest) lo stream corrente.
*/
//Dove ora c'č STREAMSIZE prima c'era TOTAL_PATT
__global__ void MMMulDevFeed(DATA *h2h, DATA *w, DATA *biases, DATA *h2h_dest, DATA *delta, DATA *error, unsigned int row_w, unsigned int col_w, unsigned int patt_per_step, unsigned int stream_offset_y) {
unsigned int current_patts;
unsigned int remaining_patts;
const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y
//Assumiamo che i blocchi siano quadrati (blockDim.x = blockDim.y)
for (unsigned int y = 0; y < STREAMSIZE; y += patt_per_step) {
remaining_patts = STREAMSIZE - y;
current_patts = (remaining_patts < patt_per_step) ? (remaining_patts) : (patt_per_step);
if (pos_block_y >= current_patts) { return; }
MMMulDevPartialFeed(h2h + y*row_w, w, biases, h2h_dest + y*col_w, delta + y*NEURO_OUTPUT, error + y*NEURO_OUTPUT, row_w, col_w, current_patts, stream_offset_y + y);
}
}
/*-------------------------------end--MMMul--Feedforward------------------------*/
/*++++---------------------------MMMul--BackPropagation-------------------------++++*/
__device__ void MMMulDevPartialBack(DATA *delta_l, DATA *w, DATA *delta_l_1, DATA *h2h_l_1, unsigned int row_w, unsigned int col_w, unsigned int num_pattern) {
int tx = threadIdx.x, ty = threadIdx.y;
int block_x = blockIdx.x;
int block_y = blockIdx.y;
int block_dim = blockDim.x; // assumiamo che i blocchi siano quadrati
int dest_x = block_x*block_dim + tx;
int dest_y = block_y*block_dim + ty;
//Dobbiamo scorrere la matrice w per righe (stiamo considerando w come fosse trasposta -- vedi 16 febbraio su diario)
int w_y = block_x*block_dim*col_w; // start block in w
int delta_l_y = block_y*block_dim*col_w; // start block in delta_l_y
int end_delta_l = delta_l_y + col_w - 1; // last block position in h2h
int step_w_y = block_dim;
int step_delta_l = block_dim;
DATA partial = 0.0f;
int block_r_border = 0; // contatore che indica in che iterazione dei blocchi ci troviamo
int current_inc;
int min;
for (int wid = w_y, deltaid = delta_l_y; deltaid <= end_delta_l; wid += step_w_y, deltaid += step_delta_l) {
block_r_border += block_dim;
__shared__ DATA shared_w[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
__shared__ DATA shared_delta_l[BLOCK_SIDE_FIRST_LAYER][BLOCK_SIDE_FIRST_LAYER];
int t_index_w = wid + tx + ty*col_w;
int t_index_delta_l = deltaid + tx + ty*col_w;
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
shared_delta_l[ty][tx] = (t_index_delta_l < num_pattern*col_w) ? (delta_l[t_index_delta_l]) : (0.0f);
//Salviamo la sottomatrice trasposta nella shared memory della matrice dei pesi (osservare che in tal modo evitiamo conflitti di banco):
shared_w[tx][ty] = (t_index_w < row_w*col_w) ? (w[t_index_w]) : (0.0f);
__syncthreads();
current_inc = col_w - (block_r_border - block_dim);
min = (current_inc < block_dim) ? (current_inc) : (block_dim);
#pragma unroll(2)
for (int k = 0; k < min; k++) {
partial += shared_delta_l[ty][k] * shared_w[k][tx];
}
__syncthreads();
}
//Attenzione alla divergenza dei threads (vedi CCC pag.137)
if (dest_x < row_w && dest_y < num_pattern) {
//Backpropagate the delta to the previous layer
DATA h2h_l_1_target = h2h_l_1[dest_y*row_w + dest_x];
delta_l_1[dest_y*row_w + dest_x] = partial*h2h_l_1_target*(1 - h2h_l_1_target);
}
}
__global__ void MMMulDevBack(DATA *delta_l, DATA *w, DATA *delta_l_1, DATA *h2h_l_1, unsigned int row_w, unsigned int col_w, unsigned int patt_per_step) {
unsigned int current_patts;
unsigned int remaining_patts;
const int pos_block_y = blockIdx.y*blockDim.x; //Posizione del blocco corrente rispetto alla griglia lungo le y
//Assumiamo che i blocchi siano quadrati (blockDim.x = blockDim.y)
for (unsigned int y = 0; y < STREAMSIZE; y += patt_per_step) {
remaining_patts = STREAMSIZE - y;
current_patts = (remaining_patts < patt_per_step) ? (remaining_patts) : (patt_per_step);
if (pos_block_y >= current_patts) { return; }
MMMulDevPartialBack(delta_l + y*col_w, w, delta_l_1 + y*row_w, h2h_l_1 + y*row_w, row_w, col_w, current_patts);
}
}
/*-------------------------------end--MMMul--BackPropagation------------------------*/
/*HOST*/
/*FEEDFORWARD AND BACKPROPAGATION PHASES -- THE INPUT IS TRANSMITTED VIA THE NETWORK AND IN BACK PROPAGATED*/
void FeedAndBack(DATA *INPUT, struct host_to_dev_mem * htdm, struct dev_struct *dev_htdm, DATA *dev_error_mat, DATA *dev_error, int *nupl, int layers, cudaStream_t *streams, BOOL first_epoch) {
//cudaEvent_t start, stop;
//Grid setting
dim3 grid, block;
unsigned int patt_per_step;
//Useful pointers
DATA *h2h, *w, *bias, *h2h_dest, *delta, *error;
//Delta da cui parte l'informazione (delta_l) e delta dove arriva tramite la backpropagation (delta_l_1)
DATA *delta_l, *delta_l_1;
//offset
int offset;
//startTimer(&start, &stop);
if (first_epoch) {
HANDLE_CUDA(cudaMemcpy(dev_htdm, htdm, (GLOBAL_BIAS_SIZE + GLOBAL_W_SIZE) * sizeof(DATA), cudaMemcpyHostToDevice));
}
//stopAndPrint(&start, &stop);
for (int i = 0; i < NSTREAMS; i++) {
//Leggere 15 febbraio del diario (passo 1 del feedforward, considerazioni)
block.x = gs.block[0];
block.y = gs.block[0];
grid.x = (nupl[1] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[0] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
offset = i*STREAMSIZE;
//Set pointers
h2h = dev_htdm->H2H + offset*nupl[0];
w = dev_htdm->WeightH2H;
bias = dev_htdm->BiasH2H;
h2h_dest = dev_htdm->H2H + htdm->matrix_H2H_index[0][1] + offset*nupl[1];
delta = dev_htdm->Delta + htdm->matrix_DELTA_index[0][layers - 2] + offset*nupl[layers - 1];
error = dev_error_mat + offset*nupl[layers - 1];
//Pointers set up
if (first_epoch) {
HANDLE_CUDA(cudaMemcpyAsync(h2h, INPUT + offset*nupl[0], nupl[0] * STREAMSIZE * sizeof(DATA), cudaMemcpyHostToDevice, streams[i]));
}
//First Feedforward step:
MMMulDevFeed << <grid, block, 0, streams[i] >> > (h2h, w, bias, h2h_dest, delta, error, nupl[0], nupl[1], patt_per_step, offset);
for (int l = 1; l < (layers - 2); l++) {
block.x = gs.block[l];
block.y = gs.block[l];
grid.x = (nupl[l + 1] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[l] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
//Set pointers
h2h = dev_htdm->H2H + htdm->matrix_H2H_index[0][l] + offset*nupl[l];
w = dev_htdm->WeightH2H + htdm->matrix_WB_index[0][l];
bias = dev_htdm->BiasH2H + htdm->matrix_WB_index[1][l];
h2h_dest = dev_htdm->H2H + htdm->matrix_H2H_index[0][l + 1] + offset*nupl[l + 1];
//Delta and error already set up
//Pointers set up
MMMulDevFeed << <grid, block, 0, streams[i] >> > (h2h, w, bias, h2h_dest, delta, error, nupl[l], nupl[l + 1], patt_per_step, offset);
}
//Last Feedforward step:
block.x = gs.block[layers - 2];
block.y = gs.block[layers - 2];
grid.x = (nupl[layers - 1] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[layers - 2] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
//Set pointers
h2h = dev_htdm->H2H + htdm->matrix_H2H_index[0][layers - 2] + offset*nupl[layers - 2];
w = dev_htdm->WeightH2H + htdm->matrix_WB_index[0][layers - 2];
bias = dev_htdm->BiasH2H + htdm->matrix_WB_index[1][layers - 2];
h2h_dest = dev_htdm->H2H + htdm->matrix_H2H_index[0][layers - 1] + offset*nupl[layers - 1];
//Delta and error already set up
//Pointers set up
MMMulDevFeed << <grid, block, 0, streams[i] >> > (h2h, w, bias, h2h_dest, delta, error, nupl[layers - 2], nupl[layers - 1], patt_per_step, offset);
//BackPropagate for all streams:
for (int delta_index = (layers - 2); delta_index > 0; delta_index--) {
block.x = gs.block[delta_index];
block.y = gs.block[delta_index];
grid.x = (nupl[delta_index] + block.x - 1) / block.x;
grid.y = MAX(gs.grid[delta_index] / grid.x, 1); //Evitare che possa diventare 0
patt_per_step = grid.y * block.y;
//Set pointers
h2h = dev_htdm->H2H + htdm->matrix_H2H_index[0][delta_index] + offset*nupl[delta_index];
w = dev_htdm->WeightH2H + htdm->matrix_WB_index[0][delta_index];
delta_l = dev_htdm->Delta + htdm->matrix_DELTA_index[0][delta_index] + offset*nupl[delta_index + 1];
delta_l_1 = dev_htdm->Delta + htdm->matrix_DELTA_index[0][delta_index - 1] + offset*nupl[delta_index];
//Pointers set up
MMMulDevBack << <grid, block, 0, streams[i] >> > (delta_l, w, delta_l_1, h2h, nupl[delta_index], nupl[delta_index + 1], patt_per_step);
}
}
//**HERE**
//Error reduction (default stream)
deviceReduceBlockAtomicKernel << <OPTIMUM_BLOCK_NUM * 2, BLOCK_SIDE*BLOCK_SIDE >> > (dev_error_mat, dev_error, TOTAL_PATT*nupl[layers-1]);
}
/*UTILITY FUNCTIONS*/
void HOST_feedforward(DATA *INPUT, DATA **W, DATA **BIAS, DATA **H2H, int *nupl) {
MMMulHost(INPUT, W[0], BIAS[0], H2H[0], TOTAL_PATT, nupl[0], nupl[1]);
MMMulHost(H2H[0], W[1], BIAS[1], H2H[1], TOTAL_PATT, nupl[1], nupl[2]);
MMMulHost(H2H[1], W[2], BIAS[2], H2H[2], TOTAL_PATT, nupl[2], nupl[3]);
}
/*Print a matrix*/
void printMat(DATA *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
printf("ROW %d : {", i);
for (int j = 0; j < cols; j++) {
printf("%f - ", mat[i*cols + j]);
}
printf("}");
printf("\n\n");
}
printf("\n\n");
}
/*Print error matrix on host (for checking correctness of device)*/
void printErrorMat(DATA *TARGET, DATA *OUTPUT_MAT, int rows, int cols) {
for (int i = 0; i < rows; i++) {
printf("ROW %d : {", i);
for (int j = 0; j < cols; j++) {
printf("%f - ", 0.5*(TARGET[i*cols + j] - OUTPUT_MAT[i*cols + j])*(TARGET[i*cols + j] - OUTPUT_MAT[i*cols + j]));
}
printf("}");
printf("\n\n");
}
printf("\n\n");
}
/*On host multiplication*/
void MMMulHost(DATA *H2H, DATA *W, DATA *BIAS, DATA *H2H_RES, int row_H2H, int col_H2H, int col_W) {
for (int i = 0; i < row_H2H; i++) {
for (int j = 0; j < col_W; j++) {
DATA prod = 0.0;
for (int k = 0; k < col_H2H; k++) {
prod += H2H[i*col_H2H + k] * W[k*col_W + j];
}
H2H_RES[i*col_W + j] = (DATA)1.0 / (DATA)(1.0 + exp(-(prod + BIAS[j]))); // bias added
}
}
}
/*Check device*/
BOOL matsAreEquals(DATA *A, DATA *B, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) { // the first column is for adapting the data
float err = fabs(A[i*cols + j] - B[i*cols + j]);
//printf("Error in i=%d,j=%d: %f\n", i, j, err);
if (err >= MAX_ERR) { printf("row: %d, col: %d\n", i, j); return 0; }
}
}
return 1;
}
/*Check device reduction*/
DATA errorReductionHost(DATA *error_mat, int rows, int cols) {
DATA reduction = 0.0f;
for (int i = 0; i < rows*cols; i++) {
reduction += error_mat[i];
}
return reduction;
}
/*ALLOCATION FUNCTIONS*/
/*init struct on host*/
void HOST_init_struct(struct host_to_dev_mem* htdm, int* nupl, int layers) {
int prev_sum[MATRIX_NUMBER_STRUCT];
htdm->matrix_H2H_index[0][0] = 0;
htdm->matrix_DELTA_index[0][0] = 0;
htdm->matrix_WB_index[0][0] = 0;
htdm->matrix_WB_index[1][0] = 0;
//Bisognerą inserire i controlli sulle malloc
/*il padding della matrice al layer corrente dipende da quello dei layer precedenti*/
for (int layer = 1; layer<(layers - 1); layer++) {
prev_sum[0] = htdm->matrix_H2H_index[0][layer - 1];
prev_sum[1] = htdm->matrix_DELTA_index[0][layer - 1];
prev_sum[2] = htdm->matrix_WB_index[0][layer - 1];
prev_sum[3] = htdm->matrix_WB_index[1][layer - 1];
htdm->matrix_H2H_index[0][layer] = nupl[layer - 1] * TOTAL_PATT + prev_sum[0];
htdm->matrix_DELTA_index[0][layer] = nupl[layer] * TOTAL_PATT + prev_sum[1];
htdm->matrix_WB_index[0][layer] = nupl[layer - 1] * nupl[layer] + prev_sum[2];
htdm->matrix_WB_index[1][layer] = nupl[layer] + prev_sum[3];
for (int i = 0; i < nupl[layer]; i++) {
for (int j = 0; j < nupl[layer + 1]; j++) {
htdm->WeightH2H[htdm->matrix_WB_index[0][layer] + i*nupl[layer + 1] + j] = (DATA)rand() / (DATA)RAND_MAX;
htdm->BiasH2H[htdm->matrix_WB_index[1][layer] + j] = (DATA)rand() / (DATA)RAND_MAX;
}
}
}
prev_sum[0] = htdm->matrix_H2H_index[0][layers - 2];
htdm->matrix_H2H_index[0][layers - 1] = nupl[layers - 2] * TOTAL_PATT + prev_sum[0];
for (int i = 0; i < nupl[0]; i++) {
for (int j = 0; j < nupl[1]; j++) {
htdm->WeightH2H[i*nupl[1] + j] = (DATA)rand() / (DATA)RAND_MAX;
htdm->BiasH2H[j] = (DATA)rand() / (DATA)RAND_MAX;
}
}
}
//NON CANCELLARE !!! INSERIRE NEL FEEDFORWARD PER FARE TEST DI CORRETTEZZA NEL PUNTO **HERE**!!!
//RICORDARSI DI DECOMMENTARE LA 'r' NEL MAIN
/*
cudaDeviceSynchronize();
DATA **H2H_RES = (DATA**)malloc(TOTAL_LAYER * sizeof(DATA*));
for (int i = 0; i < TOTAL_LAYER; i++) {
H2H_RES[i] = (DATA*)malloc(TOTAL_PATT*nupl[i] * sizeof(DATA));
}
for (int l = 0; l < (layers - 1); l++) {
HANDLE_CUDA(cudaMemcpy(htdm->H2H+ htdm->matrix_H2H_index[0][l+1],dev_htdm->H2H + htdm->matrix_H2H_index[0][l+1], (TOTAL_PATT)* nupl[l+1] * sizeof(DATA), cudaMemcpyDeviceToHost));
MMMulHost( htdm->H2H + htdm->matrix_H2H_index[0][l], htdm->WeightH2H + htdm->matrix_WB_index[0][l] , htdm->BiasH2H + htdm->matrix_WB_index[1][l], H2H_RES[l + 1], TOTAL_PATT, nupl[l], nupl[l + 1]);
BOOL b = matsAreEquals(htdm->H2H+ htdm->matrix_H2H_index[0][l+1], H2H_RES[l + 1], TOTAL_PATT, nupl[l + 1]);
printf("layer%d %d\n",l, b);
}*/ |
8db7e18688f44ac34d7ab864cadcf4b4aa8cf784.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
#include "unary_op_hip.cuh"
namespace raft {
namespace linalg {
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename InType, typename IdxType = int, typename OutType = InType>
void unaryOpLaunch(OutType *out, const InType *in, InType scalar, IdxType len,
hipStream_t stream) {
if (in == nullptr) {
auto op = [scalar] __device__(OutType * ptr, IdxType idx) {
*ptr = static_cast<OutType>(scalar * idx);
};
writeOnlyUnaryOp<OutType, decltype(op), IdxType>(out, len, op, stream);
} else {
auto op = [scalar] __device__(InType in) {
return static_cast<OutType>(in * scalar);
};
unaryOp<InType, decltype(op), IdxType, OutType>(out, in, len, op, stream);
}
}
template <typename InType, typename IdxType, typename OutType = InType>
class UnaryOpTest
: public ::testing::TestWithParam<UnaryOpInputs<InType, IdxType, OutType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<
UnaryOpInputs<InType, IdxType, OutType>>::GetParam();
raft::random::Rng r(params.seed);
CUDA_CHECK(hipStreamCreate(&stream));
auto len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, InType(-1.0), InType(1.0), stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
virtual void DoTest() {
auto len = params.len;
auto scalar = params.scalar;
naiveScale(out_ref, in, scalar, len, stream);
unaryOpLaunch(out, in, scalar, len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<OutType>(params.tolerance)));
}
UnaryOpInputs<InType, IdxType, OutType> params;
InType *in;
OutType *out_ref, *out;
hipStream_t stream;
};
template <typename OutType, typename IdxType>
class WriteOnlyUnaryOpTest : public UnaryOpTest<OutType, IdxType, OutType> {
protected:
void DoTest() override {
auto len = this->params.len;
auto scalar = this->params.scalar;
naiveScale(this->out_ref, (OutType *)nullptr, scalar, len, this->stream);
unaryOpLaunch(this->out, (OutType *)nullptr, scalar, len, this->stream);
CUDA_CHECK(hipStreamSynchronize(this->stream));
ASSERT_TRUE(devArrMatch(this->out_ref, this->out, this->params.len,
CompareApprox<OutType>(this->params.tolerance)));
}
};
#define UNARY_OP_TEST(Name, inputs) \
TEST_P(Name, Result) { DoTest(); } \
INSTANTIATE_TEST_SUITE_P(UnaryOpTests, Name, ::testing::ValuesIn(inputs))
const std::vector<UnaryOpInputs<float, int>> inputsf_i32 = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef UnaryOpTest<float, int> UnaryOpTestF_i32;
UNARY_OP_TEST(UnaryOpTestF_i32, inputsf_i32);
typedef WriteOnlyUnaryOpTest<float, int> WriteOnlyUnaryOpTestF_i32;
UNARY_OP_TEST(WriteOnlyUnaryOpTestF_i32, inputsf_i32);
const std::vector<UnaryOpInputs<float, size_t>> inputsf_i64 = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef UnaryOpTest<float, size_t> UnaryOpTestF_i64;
UNARY_OP_TEST(UnaryOpTestF_i64, inputsf_i64);
typedef WriteOnlyUnaryOpTest<float, size_t> WriteOnlyUnaryOpTestF_i64;
UNARY_OP_TEST(WriteOnlyUnaryOpTestF_i64, inputsf_i64);
const std::vector<UnaryOpInputs<float, int, double>> inputsf_i32_d = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef UnaryOpTest<float, int, double> UnaryOpTestF_i32_D;
UNARY_OP_TEST(UnaryOpTestF_i32_D, inputsf_i32_d);
const std::vector<UnaryOpInputs<double, int>> inputsd_i32 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
typedef UnaryOpTest<double, int> UnaryOpTestD_i32;
UNARY_OP_TEST(UnaryOpTestD_i32, inputsd_i32);
typedef WriteOnlyUnaryOpTest<double, int> WriteOnlyUnaryOpTestD_i32;
UNARY_OP_TEST(WriteOnlyUnaryOpTestD_i32, inputsd_i32);
const std::vector<UnaryOpInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
typedef UnaryOpTest<double, size_t> UnaryOpTestD_i64;
UNARY_OP_TEST(UnaryOpTestD_i64, inputsd_i64);
typedef WriteOnlyUnaryOpTest<double, size_t> WriteOnlyUnaryOpTestD_i64;
UNARY_OP_TEST(WriteOnlyUnaryOpTestD_i64, inputsd_i64);
} // end namespace linalg
} // end namespace raft
| 8db7e18688f44ac34d7ab864cadcf4b4aa8cf784.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
#include "unary_op.cuh"
namespace raft {
namespace linalg {
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename InType, typename IdxType = int, typename OutType = InType>
void unaryOpLaunch(OutType *out, const InType *in, InType scalar, IdxType len,
cudaStream_t stream) {
if (in == nullptr) {
auto op = [scalar] __device__(OutType * ptr, IdxType idx) {
*ptr = static_cast<OutType>(scalar * idx);
};
writeOnlyUnaryOp<OutType, decltype(op), IdxType>(out, len, op, stream);
} else {
auto op = [scalar] __device__(InType in) {
return static_cast<OutType>(in * scalar);
};
unaryOp<InType, decltype(op), IdxType, OutType>(out, in, len, op, stream);
}
}
template <typename InType, typename IdxType, typename OutType = InType>
class UnaryOpTest
: public ::testing::TestWithParam<UnaryOpInputs<InType, IdxType, OutType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<
UnaryOpInputs<InType, IdxType, OutType>>::GetParam();
raft::random::Rng r(params.seed);
CUDA_CHECK(cudaStreamCreate(&stream));
auto len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, InType(-1.0), InType(1.0), stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
virtual void DoTest() {
auto len = params.len;
auto scalar = params.scalar;
naiveScale(out_ref, in, scalar, len, stream);
unaryOpLaunch(out, in, scalar, len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<OutType>(params.tolerance)));
}
UnaryOpInputs<InType, IdxType, OutType> params;
InType *in;
OutType *out_ref, *out;
cudaStream_t stream;
};
template <typename OutType, typename IdxType>
class WriteOnlyUnaryOpTest : public UnaryOpTest<OutType, IdxType, OutType> {
protected:
void DoTest() override {
auto len = this->params.len;
auto scalar = this->params.scalar;
naiveScale(this->out_ref, (OutType *)nullptr, scalar, len, this->stream);
unaryOpLaunch(this->out, (OutType *)nullptr, scalar, len, this->stream);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
ASSERT_TRUE(devArrMatch(this->out_ref, this->out, this->params.len,
CompareApprox<OutType>(this->params.tolerance)));
}
};
#define UNARY_OP_TEST(Name, inputs) \
TEST_P(Name, Result) { DoTest(); } \
INSTANTIATE_TEST_SUITE_P(UnaryOpTests, Name, ::testing::ValuesIn(inputs))
const std::vector<UnaryOpInputs<float, int>> inputsf_i32 = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef UnaryOpTest<float, int> UnaryOpTestF_i32;
UNARY_OP_TEST(UnaryOpTestF_i32, inputsf_i32);
typedef WriteOnlyUnaryOpTest<float, int> WriteOnlyUnaryOpTestF_i32;
UNARY_OP_TEST(WriteOnlyUnaryOpTestF_i32, inputsf_i32);
const std::vector<UnaryOpInputs<float, size_t>> inputsf_i64 = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef UnaryOpTest<float, size_t> UnaryOpTestF_i64;
UNARY_OP_TEST(UnaryOpTestF_i64, inputsf_i64);
typedef WriteOnlyUnaryOpTest<float, size_t> WriteOnlyUnaryOpTestF_i64;
UNARY_OP_TEST(WriteOnlyUnaryOpTestF_i64, inputsf_i64);
const std::vector<UnaryOpInputs<float, int, double>> inputsf_i32_d = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef UnaryOpTest<float, int, double> UnaryOpTestF_i32_D;
UNARY_OP_TEST(UnaryOpTestF_i32_D, inputsf_i32_d);
const std::vector<UnaryOpInputs<double, int>> inputsd_i32 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
typedef UnaryOpTest<double, int> UnaryOpTestD_i32;
UNARY_OP_TEST(UnaryOpTestD_i32, inputsd_i32);
typedef WriteOnlyUnaryOpTest<double, int> WriteOnlyUnaryOpTestD_i32;
UNARY_OP_TEST(WriteOnlyUnaryOpTestD_i32, inputsd_i32);
const std::vector<UnaryOpInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 2.0, 1234ULL}};
typedef UnaryOpTest<double, size_t> UnaryOpTestD_i64;
UNARY_OP_TEST(UnaryOpTestD_i64, inputsd_i64);
typedef WriteOnlyUnaryOpTest<double, size_t> WriteOnlyUnaryOpTestD_i64;
UNARY_OP_TEST(WriteOnlyUnaryOpTestD_i64, inputsd_i64);
} // end namespace linalg
} // end namespace raft
|
6a01355e400860ea0b7dd63f7314758df9ba0389.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "F3.cuh"
#include "IO.h"
#include "constants.cuh"
#include <iostream>
#include <vector>
#include <iterator>
#include <fstream>
F3::F3(uint _dim, uint _ps):Benchmarks()
{
n_dim = _dim;
ps = _ps;
min = -600.0;
max = +600.0;
ID = 3;
n_threads = 32;
n_blocks = (ps%n_threads)? (ps/n_threads)+1 : (ps/n_threads);
/* ---------------------------------------------- */
/* Load a shift vector to test the bench function */
std::string file_name = "data-files/shift_griewank.mat";
std::string vec_name = "Shift - Griewank [-600.0, +600.0]";
IO * io = new IO();
std::ifstream file(file_name);
if( not file.is_open() ){
std::cout << "\"data-files/shift_griewank.mat\" could not be opened\n";
exit(-1);
}
auto loaded_vec = io->load_vector<float>( vec_name, file ) ;
file.close();
/* ---------------------------------------------- */
checkCudaErrors(hipMemcpyToSymbol(shift, (void *) loaded_vec.data(), n_dim * sizeof(float)));
}
F3::~F3()
{
/*empty*/
}
__global__ void computeK3(float * x, float * f){
uint id_p = threadIdx.x + (blockIdx.x * blockDim.x);
uint ps = params.ps;
if( id_p < ps ){
uint ndim = params.n_dim;
uint id_d = id_p * ndim;
float s1 = 0.0, s2 = 1.0, z;
for(uint i = 0; i < ndim; i++){
z = x[id_d + i] - shift[i];
s1 += (z * z);
s2 *= cosf(z/sqrtf(i+1));
}
s1 /= 4000.0;
s1 = (s1 - s2 + 1.0);
if( s1 <= 10e-08 )
s1 = 0.0f;
f[id_p] = s1;
}
}
void F3::compute(float * x, float * f){
hipLaunchKernelGGL(( computeK3), dim3(n_blocks), dim3(n_threads) , 0, 0, x, f);
checkCudaErrors(hipGetLastError());
}
| 6a01355e400860ea0b7dd63f7314758df9ba0389.cu | #include "F3.cuh"
#include "IO.h"
#include "constants.cuh"
#include <iostream>
#include <vector>
#include <iterator>
#include <fstream>
F3::F3(uint _dim, uint _ps):Benchmarks()
{
n_dim = _dim;
ps = _ps;
min = -600.0;
max = +600.0;
ID = 3;
n_threads = 32;
n_blocks = (ps%n_threads)? (ps/n_threads)+1 : (ps/n_threads);
/* ---------------------------------------------- */
/* Load a shift vector to test the bench function */
std::string file_name = "data-files/shift_griewank.mat";
std::string vec_name = "Shift - Griewank [-600.0, +600.0]";
IO * io = new IO();
std::ifstream file(file_name);
if( not file.is_open() ){
std::cout << "\"data-files/shift_griewank.mat\" could not be opened\n";
exit(-1);
}
auto loaded_vec = io->load_vector<float>( vec_name, file ) ;
file.close();
/* ---------------------------------------------- */
checkCudaErrors(cudaMemcpyToSymbol(shift, (void *) loaded_vec.data(), n_dim * sizeof(float)));
}
F3::~F3()
{
/*empty*/
}
__global__ void computeK3(float * x, float * f){
uint id_p = threadIdx.x + (blockIdx.x * blockDim.x);
uint ps = params.ps;
if( id_p < ps ){
uint ndim = params.n_dim;
uint id_d = id_p * ndim;
float s1 = 0.0, s2 = 1.0, z;
for(uint i = 0; i < ndim; i++){
z = x[id_d + i] - shift[i];
s1 += (z * z);
s2 *= cosf(z/sqrtf(i+1));
}
s1 /= 4000.0;
s1 = (s1 - s2 + 1.0);
if( s1 <= 10e-08 )
s1 = 0.0f;
f[id_p] = s1;
}
}
void F3::compute(float * x, float * f){
computeK3<<< n_blocks, n_threads >>>(x, f);
checkCudaErrors(cudaGetLastError());
}
|
51527b4080a069f10b10bc0397df12856caaf418.hip | // !!! This is a file automatically generated by hipify!!!
#include "std_incl.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include <ctime>
#include "omp.h"
#include <stdio.h>
#include <fstream>
#include <sys/stat.h>
#include <vector>
#include <boost/tokenizer.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_01.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/chrono.hpp>
#include <memory>
#include "../cputrack/ResultManager.h"
#include "../cputrack/QueuedTracker.h"
#include "../cputrack/QueuedCPUTracker.h"
#include "../cudatrack/QueuedCUDATracker.h"
#include "../cputrack-test/SharedTests.h"
/*
This file tests the Result Manager. It is pretty straightforward:
- Build the Result Manager as LabView would
- Overload it with Data
- Test whether it loses frames during overload
*/
int main(int argc, char* argv[])
{
//At the end, we report the time taken.
auto timeStart = boost::chrono::high_resolution_clock::now();
//Just a reminder.
fprintf(stderr, "Note: Initialising ndlab/test/ResultManager (%d arguments).\n", argc);
//Otherwise, it will throw errors.
if (argc != 3)
{
fprintf(stderr, "You have to give in N, the number of images (multiple of 4) and \n\t gpu or cpu.\n");
return 0;
}
//Number of images, which are inputted as if there are 4 beads (which really doesn't matter).
const int N = (int)atoi(argv[1]); //4 beads, 24 images; 6 frames per image?
//Always report back.
fprintf(stderr, "Testing ResultManager with %d images. %s \n", N, argv[1]);
//Where to safe data/frameinfo.
const char* file = "./ResultManagerData.txt";
const char * frameinfo = "./ResultManagerFrameInfo.txt";
// Better to use modern smart pointers. Config settings are largely taken from older tests.
std::shared_ptr<ResultManagerConfig> cfg = std::make_shared<ResultManagerConfig>();
cfg->numBeads = 4;
cfg->numFrameInfoColumns = 0;
cfg->scaling = vector3f(1.0f, 1.0f, 1.0f);
cfg->offset = vector3f(0.0f, 0.0f, 0.0f);
cfg->writeInterval = 2500;
cfg->maxFramesInMemory = 0;// 100000;
cfg->binaryOutput = false;
std::vector< std::string > colNames;
std::string testName("Hey now");
colNames.push_back(testName);
fprintf(stderr, "Allocating Result Manager now. \n");
// The manager is an instance of the Result Manager class. Surprise.
std::shared_ptr<ResultManager> manager = std::make_shared<ResultManager>(file, frameinfo, cfg.get(), colNames);
//The QueuedCPUTracker instance is required to retrieve the results. It needs settings.
QTrkComputedConfig settings;
settings.qi_iterations = 5;
settings.zlut_minradius = 1;
settings.qi_minradius = 1;
settings.width = settings.height = 100;
settings.Update();
//Experimental file to use for test.
std::string fileName = "./exp.jpg";
//Make sure the file exists; otherwise, it will throw a really weird error.
bool fileExists = boost::filesystem::exists(fileName);
if (!fileExists)
{
fprintf(stderr, "File %s not found; is it in the directory of the executable?\n\n", fileName.c_str());
return 0;
}
//Let's load some image data.
auto data = ReadJPEGFile(fileName.c_str());
std::shared_ptr<QueuedTracker> qtrk;
//Make sure we are using the right tracker (as requested)
if (argc == 3)
{
std::string argTracker = std::string(argv[2]);
if (argTracker == "gpu")
{
fprintf(stderr, "Using CUDA tracker (GPU).\n");
std::shared_ptr<QueuedCUDATracker> cudaTracker = std::make_shared<QueuedCUDATracker>(settings);
cudaTracker->EnableTextureCache(true);
qtrk = cudaTracker;
}
else if(argTracker == "cpu")
{
fprintf(stderr, "Using CPU tracker (CPU).\n");
qtrk = std::make_shared<QueuedCPUTracker>(settings);
}
else
{
fprintf(stderr, "No tracker specified. Choose either cpu or gpu.\n");
return 0;
}
}
else
{
fprintf(stderr, "Faulty arguments. Your mother was a hamster, %d th of her name.", argc);
return 0;
}
//localization Mode QI tracker
auto modeQI = (LocMode_t)(LT_QI | LT_NormalizeProfile | LT_LocalizeZ);
qtrk->SetLocalizationMode(modeQI);
//Make the calculations
std::vector<LocalizationJob> jobs;
int frame = 0;
for (int i = 0; i < N; i++)
{
if (i % 4 == 0 && i != 0)
{
frame++;
}
//Make a localization job (batch of calculations)
LocalizationJob job(frame, 0, 0, 0);
job.zlutIndex = i % 4; //actually, frame number
jobs.push_back(job);
qtrk->ScheduleImageData(&data, &job);
}
manager->SetTracker(qtrk.get());
//Process images (using Flush because Start is CPU only)
qtrk->Flush();
//Report progress
int i = 0;
while (manager->GetFrameCounters().localizationsDone < N)
{
if (i > 100000)
{
auto counters = manager->GetFrameCounters();
fprintf(stderr, "Update: %d Localisations performed.\n", counters.localizationsDone);
i = 0;
}
i++;
}
//Assign frame
float somefloat = 0.0;
for (int i = 0; i < N / 4; i++)
{
manager->StoreFrameInfo(i, i, &somefloat);
}
//Pointer that will be filled with results
std::vector<LocalizationResult> results;
vector3f startPosition(0.0f, 0.0f, 0.0f);
vector2f initialGuess(45.0f, 50.0f);
//Allocate the 'array' that will hold the results.
for (int i = 0; i < N; i++)
{
LocalizationResult currentResult;
currentResult.job = jobs.at(i);
currentResult.pos = startPosition;
currentResult.firstGuess = initialGuess;
currentResult.error = 0;
currentResult.imageMean = 0.0f;
results.push_back(currentResult);
}
//Fill results array
manager->Flush();
//Wait untill all localizations have been performed.
i = 0;
while (manager->GetFrameCounters().lastSaveFrame < N / 4)
{
if (i > 100000)
{
auto counters = manager->GetFrameCounters();
fprintf(stderr, "Update[%.3f]: %d frames saved.\n", i, counters.lastSaveFrame);
}
}
//Get the results
auto counters = manager->GetFrameCounters();
auto getResults = manager->GetResults(results.data(), 0, N / cfg->numBeads);
//Report results
fprintf(stderr, "ResultManager results (%d) :\n", getResults);
fprintf(stderr, "\t frame\t bead\t x\t y\t z\n");
for (unsigned int i = (results.size() - 25)>0 ? (results.size() - 25) : 0; i < results.size(); i++)
{
auto result = results[i];
fprintf(stderr, "\t%d\t%d\t%.3f\t%.3f\t%.3f\n", result.job.frame, i % 4, result.pos.x, result.pos.y, result.pos.z);
}
//Report final information
printf("Frame counters:\n\t Started at %d, processed %d, finished on %d\n", counters.startFrame, counters.processedFrames, counters.lastSaveFrame);
printf("\tCaptured %d, localizations %d, lostFrames %d, file error %d.\n", counters.capturedFrames, counters.localizationsDone, counters.lostFrames, counters.fileError);
//report time, end program
auto timeEnd = boost::chrono::high_resolution_clock::now();
auto microSeconds = boost::chrono::duration_cast<boost::chrono::microseconds>(timeEnd - timeStart).count();
fprintf(stderr, "Note: Elapsed time %ld microseconds. \n", microSeconds);
return 0;
}
| 51527b4080a069f10b10bc0397df12856caaf418.cu | #include "std_incl.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include <ctime>
#include "omp.h"
#include <stdio.h>
#include <fstream>
#include <sys/stat.h>
#include <vector>
#include <boost/tokenizer.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_01.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/chrono.hpp>
#include <memory>
#include "../cputrack/ResultManager.h"
#include "../cputrack/QueuedTracker.h"
#include "../cputrack/QueuedCPUTracker.h"
#include "../cudatrack/QueuedCUDATracker.h"
#include "../cputrack-test/SharedTests.h"
/*
This file tests the Result Manager. It is pretty straightforward:
- Build the Result Manager as LabView would
- Overload it with Data
- Test whether it loses frames during overload
*/
int main(int argc, char* argv[])
{
//At the end, we report the time taken.
auto timeStart = boost::chrono::high_resolution_clock::now();
//Just a reminder.
fprintf(stderr, "Note: Initialising ndlab/test/ResultManager (%d arguments).\n", argc);
//Otherwise, it will throw errors.
if (argc != 3)
{
fprintf(stderr, "You have to give in N, the number of images (multiple of 4) and \n\t gpu or cpu.\n");
return 0;
}
//Number of images, which are inputted as if there are 4 beads (which really doesn't matter).
const int N = (int)atoi(argv[1]); //4 beads, 24 images; 6 frames per image?
//Always report back.
fprintf(stderr, "Testing ResultManager with %d images. %s \n", N, argv[1]);
//Where to safe data/frameinfo.
const char* file = "./ResultManagerData.txt";
const char * frameinfo = "./ResultManagerFrameInfo.txt";
// Better to use modern smart pointers. Config settings are largely taken from older tests.
std::shared_ptr<ResultManagerConfig> cfg = std::make_shared<ResultManagerConfig>();
cfg->numBeads = 4;
cfg->numFrameInfoColumns = 0;
cfg->scaling = vector3f(1.0f, 1.0f, 1.0f);
cfg->offset = vector3f(0.0f, 0.0f, 0.0f);
cfg->writeInterval = 2500;
cfg->maxFramesInMemory = 0;// 100000;
cfg->binaryOutput = false;
std::vector< std::string > colNames;
std::string testName("Hey now");
colNames.push_back(testName);
fprintf(stderr, "Allocating Result Manager now. \n");
// The manager is an instance of the Result Manager class. Surprise.
std::shared_ptr<ResultManager> manager = std::make_shared<ResultManager>(file, frameinfo, cfg.get(), colNames);
//The QueuedCPUTracker instance is required to retrieve the results. It needs settings.
QTrkComputedConfig settings;
settings.qi_iterations = 5;
settings.zlut_minradius = 1;
settings.qi_minradius = 1;
settings.width = settings.height = 100;
settings.Update();
//Experimental file to use for test.
std::string fileName = "./exp.jpg";
//Make sure the file exists; otherwise, it will throw a really weird error.
bool fileExists = boost::filesystem::exists(fileName);
if (!fileExists)
{
fprintf(stderr, "File %s not found; is it in the directory of the executable?\n\n", fileName.c_str());
return 0;
}
//Let's load some image data.
auto data = ReadJPEGFile(fileName.c_str());
std::shared_ptr<QueuedTracker> qtrk;
//Make sure we are using the right tracker (as requested)
if (argc == 3)
{
std::string argTracker = std::string(argv[2]);
if (argTracker == "gpu")
{
fprintf(stderr, "Using CUDA tracker (GPU).\n");
std::shared_ptr<QueuedCUDATracker> cudaTracker = std::make_shared<QueuedCUDATracker>(settings);
cudaTracker->EnableTextureCache(true);
qtrk = cudaTracker;
}
else if(argTracker == "cpu")
{
fprintf(stderr, "Using CPU tracker (CPU).\n");
qtrk = std::make_shared<QueuedCPUTracker>(settings);
}
else
{
fprintf(stderr, "No tracker specified. Choose either cpu or gpu.\n");
return 0;
}
}
else
{
fprintf(stderr, "Faulty arguments. Your mother was a hamster, %d th of her name.", argc);
return 0;
}
//localization Mode QI tracker
auto modeQI = (LocMode_t)(LT_QI | LT_NormalizeProfile | LT_LocalizeZ);
qtrk->SetLocalizationMode(modeQI);
//Make the calculations
std::vector<LocalizationJob> jobs;
int frame = 0;
for (int i = 0; i < N; i++)
{
if (i % 4 == 0 && i != 0)
{
frame++;
}
//Make a localization job (batch of calculations)
LocalizationJob job(frame, 0, 0, 0);
job.zlutIndex = i % 4; //actually, frame number
jobs.push_back(job);
qtrk->ScheduleImageData(&data, &job);
}
manager->SetTracker(qtrk.get());
//Process images (using Flush because Start is CPU only)
qtrk->Flush();
//Report progress
int i = 0;
while (manager->GetFrameCounters().localizationsDone < N)
{
if (i > 100000)
{
auto counters = manager->GetFrameCounters();
fprintf(stderr, "Update: %d Localisations performed.\n", counters.localizationsDone);
i = 0;
}
i++;
}
//Assign frame
float somefloat = 0.0;
for (int i = 0; i < N / 4; i++)
{
manager->StoreFrameInfo(i, i, &somefloat);
}
//Pointer that will be filled with results
std::vector<LocalizationResult> results;
vector3f startPosition(0.0f, 0.0f, 0.0f);
vector2f initialGuess(45.0f, 50.0f);
//Allocate the 'array' that will hold the results.
for (int i = 0; i < N; i++)
{
LocalizationResult currentResult;
currentResult.job = jobs.at(i);
currentResult.pos = startPosition;
currentResult.firstGuess = initialGuess;
currentResult.error = 0;
currentResult.imageMean = 0.0f;
results.push_back(currentResult);
}
//Fill results array
manager->Flush();
//Wait untill all localizations have been performed.
i = 0;
while (manager->GetFrameCounters().lastSaveFrame < N / 4)
{
if (i > 100000)
{
auto counters = manager->GetFrameCounters();
fprintf(stderr, "Update[%.3f]: %d frames saved.\n", i, counters.lastSaveFrame);
}
}
//Get the results
auto counters = manager->GetFrameCounters();
auto getResults = manager->GetResults(results.data(), 0, N / cfg->numBeads);
//Report results
fprintf(stderr, "ResultManager results (%d) :\n", getResults);
fprintf(stderr, "\t frame\t bead\t x\t y\t z\n");
for (unsigned int i = (results.size() - 25)>0 ? (results.size() - 25) : 0; i < results.size(); i++)
{
auto result = results[i];
fprintf(stderr, "\t%d\t%d\t%.3f\t%.3f\t%.3f\n", result.job.frame, i % 4, result.pos.x, result.pos.y, result.pos.z);
}
//Report final information
printf("Frame counters:\n\t Started at %d, processed %d, finished on %d\n", counters.startFrame, counters.processedFrames, counters.lastSaveFrame);
printf("\tCaptured %d, localizations %d, lostFrames %d, file error %d.\n", counters.capturedFrames, counters.localizationsDone, counters.lostFrames, counters.fileError);
//report time, end program
auto timeEnd = boost::chrono::high_resolution_clock::now();
auto microSeconds = boost::chrono::duration_cast<boost::chrono::microseconds>(timeEnd - timeStart).count();
fprintf(stderr, "Note: Elapsed time %ld microseconds. \n", microSeconds);
return 0;
}
|
445323fb5ebe682240de038154d62f500fcef561.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zmdot_shfl.cu, normal z -> s, Sun Nov 20 20:20:40 2016
@author Moritz Kreutzer
*/
#include "magmasparse_internal.h"
#include "magmasparse_s.h"
#define BLOCK_SIZE 512
#define PRECISION_s
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION <= 6000)
// CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier.
// from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
__device__ inline
real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) {
int2 a = *reinterpret_cast<int2*>(&var);
a.x = __shfl_down(a.x, srcLane, width);
a.y = __shfl_down(a.y, srcLane, width);
return *reinterpret_cast<float*>(&a);
}
#endif
template<typename T>
__inline__ __device__
T warpReduceSum(T val)
{
#if __CUDA_ARCH__ >= 300
val += __shfl_down(val, 16);
val += __shfl_down(val, 8);
val += __shfl_down(val, 4);
val += __shfl_down(val, 2);
val += __shfl_down(val, 1);
#endif
return val;
}
#ifdef PRECISION_z
template<>
__inline__ __device__
float warpReduceSum<float>(float val)
{
#if __CUDA_ARCH__ >= 300
int4 a = *reinterpret_cast<int4*>(&val);
a.x += __shfl_down(a.x, 16);
a.y += __shfl_down(a.y, 16);
a.z += __shfl_down(a.z, 16);
a.w += __shfl_down(a.w, 16);
a.x += __shfl_down(a.x, 8);
a.y += __shfl_down(a.y, 8);
a.z += __shfl_down(a.z, 8);
a.w += __shfl_down(a.w, 8);
a.x += __shfl_down(a.x, 4);
a.y += __shfl_down(a.y, 4);
a.z += __shfl_down(a.z, 4);
a.w += __shfl_down(a.w, 4);
a.x += __shfl_down(a.x, 2);
a.y += __shfl_down(a.y, 2);
a.z += __shfl_down(a.z, 2);
a.w += __shfl_down(a.w, 2);
a.x += __shfl_down(a.x, 1);
a.y += __shfl_down(a.y, 1);
a.z += __shfl_down(a.z, 1);
a.w += __shfl_down(a.w, 1);
#endif
return val;
}
#endif // PRECISION_z
#ifdef PRECISION_c
template<>
__inline__ __device__
magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val)
{
#if __CUDA_ARCH__ >= 300
float2 a = *reinterpret_cast<float2*>(&val);
a.x += __shfl_down(a.x, 16);
a.y += __shfl_down(a.y, 16);
a.x += __shfl_down(a.x, 8);
a.y += __shfl_down(a.y, 8);
a.x += __shfl_down(a.x, 4);
a.y += __shfl_down(a.y, 4);
a.x += __shfl_down(a.x, 2);
a.y += __shfl_down(a.y, 2);
a.x += __shfl_down(a.x, 1);
a.y += __shfl_down(a.y, 1);
#endif
return val;
}
#endif // PRECISION_c
template<typename T>
__inline__ __device__
T blockReduceSum_1D(T val)
{
extern __shared__ T shared[]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum<T>(val); // Each warp performs partial reduction
if (lane == 0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_S_ZERO;
if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp
return val;
}
template<typename T>
__inline__ __device__
T blockReduceSum(T val)
{
extern __shared__ T shared[]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum<T>(val); // Each warp performs partial reduction
if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_S_ZERO;
if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp
return val;
}
template<typename T>
__global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N)
{
T sum = MAGMA_S_MAKE(0.0, 0.0);
//reduce multiple elements per thread
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = blockReduceSum<T>(sum);
if (threadIdx.x == 0)
out[blockIdx.x]=sum;
}
// dot product for multiple vectors using shuffle intrinsics and less shared memory
__global__ void
magma_sblockdot_kernel_shuffle(
int n,
int k,
const float * __restrict__ v,
const float * __restrict__ r,
float * __restrict__ vtmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = threadIdx.y;
float tmp;
if (i < n) {
tmp = v[i+j*n] * r[i];
} else {
tmp = MAGMA_S_ZERO;
}
tmp = blockReduceSum(tmp);
if (threadIdx.x == 0 ){
vtmp[ blockIdx.x+j*gridDim.x ] = tmp;
}
}
// dot product for multiple vectors using shuffle intrinsics and less shared memory
__global__ void
magma_sblockdot_kernel_shuffle_1dblock(
int n,
int k,
const float * __restrict__ v,
const float * __restrict__ r,
float * __restrict__ vtmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j;
for (j=0; j < k; j++) {
float tmp;
if (i < n) {
tmp = v[i+j*n] * r[i];
} else {
tmp = MAGMA_S_ZERO;
}
tmp = blockReduceSum_1D(tmp);
if (threadIdx.x == 0 ){
vtmp[ blockIdx.x+j*gridDim.x ] = tmp;
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc_shfl(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
if ( magma_getdevice_arch() < 300 ) {
return magma_smdotc( n, k, v, r, d1, d2, skp, queue );
}
else if (1) { // 1D block kernel seems to be always faster
dim3 block( BLOCK_SIZE );
dim3 grid( magma_ceildiv( n, block.x ) );
hipLaunchKernelGGL(( magma_sblockdot_kernel_shuffle_1dblock), dim3(grid), dim3(block), 32*sizeof(float), queue->cuda_stream() , n, k, v, r, d1 );
int j;
for (j=0; j < k; j++) {
hipLaunchKernelGGL(( deviceReduceKernel<float>) , dim3(1), dim3(1024), 32*sizeof(float), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x);
}
} else {
dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k );
while (block.x*block.y > 1024) {
block.x -= 32;
}
dim3 grid( magma_ceildiv( n, block.x ) );
hipLaunchKernelGGL(( magma_sblockdot_kernel_shuffle), dim3(grid), dim3(block), 32*k*sizeof(float), queue->cuda_stream() , n, k, v, r, d1 );
int j;
for (j=0; j < k; j++) {
hipLaunchKernelGGL(( deviceReduceKernel<float>) , dim3(1), dim3(1024), 32*sizeof(float), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x);
}
}
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgemvmdot_shfl(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
if (k == 1) { // call CUBLAS dotc, we will never be faster
float res = magma_sdot( n, v, 1, r, 1, queue );
magma_ssetvector( 1, &res, 1, skp, 1, queue );
}
else if ( magma_getdevice_arch() < 300 ) {
return magma_sgemvmdot( n, k, v, r, d1, d2, skp, queue );
}
else {
magma_smdotc_shfl( n, k, v, r, d1, d2, skp, queue );
}
return MAGMA_SUCCESS;
}
| 445323fb5ebe682240de038154d62f500fcef561.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zmdot_shfl.cu, normal z -> s, Sun Nov 20 20:20:40 2016
@author Moritz Kreutzer
*/
#include "magmasparse_internal.h"
#include "magmasparse_s.h"
#define BLOCK_SIZE 512
#define PRECISION_s
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION <= 6000)
// CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier.
// from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
__device__ inline
real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) {
int2 a = *reinterpret_cast<int2*>(&var);
a.x = __shfl_down(a.x, srcLane, width);
a.y = __shfl_down(a.y, srcLane, width);
return *reinterpret_cast<float*>(&a);
}
#endif
template<typename T>
__inline__ __device__
T warpReduceSum(T val)
{
#if __CUDA_ARCH__ >= 300
val += __shfl_down(val, 16);
val += __shfl_down(val, 8);
val += __shfl_down(val, 4);
val += __shfl_down(val, 2);
val += __shfl_down(val, 1);
#endif
return val;
}
#ifdef PRECISION_z
template<>
__inline__ __device__
float warpReduceSum<float>(float val)
{
#if __CUDA_ARCH__ >= 300
int4 a = *reinterpret_cast<int4*>(&val);
a.x += __shfl_down(a.x, 16);
a.y += __shfl_down(a.y, 16);
a.z += __shfl_down(a.z, 16);
a.w += __shfl_down(a.w, 16);
a.x += __shfl_down(a.x, 8);
a.y += __shfl_down(a.y, 8);
a.z += __shfl_down(a.z, 8);
a.w += __shfl_down(a.w, 8);
a.x += __shfl_down(a.x, 4);
a.y += __shfl_down(a.y, 4);
a.z += __shfl_down(a.z, 4);
a.w += __shfl_down(a.w, 4);
a.x += __shfl_down(a.x, 2);
a.y += __shfl_down(a.y, 2);
a.z += __shfl_down(a.z, 2);
a.w += __shfl_down(a.w, 2);
a.x += __shfl_down(a.x, 1);
a.y += __shfl_down(a.y, 1);
a.z += __shfl_down(a.z, 1);
a.w += __shfl_down(a.w, 1);
#endif
return val;
}
#endif // PRECISION_z
#ifdef PRECISION_c
template<>
__inline__ __device__
magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val)
{
#if __CUDA_ARCH__ >= 300
float2 a = *reinterpret_cast<float2*>(&val);
a.x += __shfl_down(a.x, 16);
a.y += __shfl_down(a.y, 16);
a.x += __shfl_down(a.x, 8);
a.y += __shfl_down(a.y, 8);
a.x += __shfl_down(a.x, 4);
a.y += __shfl_down(a.y, 4);
a.x += __shfl_down(a.x, 2);
a.y += __shfl_down(a.y, 2);
a.x += __shfl_down(a.x, 1);
a.y += __shfl_down(a.y, 1);
#endif
return val;
}
#endif // PRECISION_c
template<typename T>
__inline__ __device__
T blockReduceSum_1D(T val)
{
extern __shared__ T shared[]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum<T>(val); // Each warp performs partial reduction
if (lane == 0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_S_ZERO;
if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp
return val;
}
template<typename T>
__inline__ __device__
T blockReduceSum(T val)
{
extern __shared__ T shared[]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum<T>(val); // Each warp performs partial reduction
if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_S_ZERO;
if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp
return val;
}
template<typename T>
__global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N)
{
T sum = MAGMA_S_MAKE(0.0, 0.0);
//reduce multiple elements per thread
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = blockReduceSum<T>(sum);
if (threadIdx.x == 0)
out[blockIdx.x]=sum;
}
// dot product for multiple vectors using shuffle intrinsics and less shared memory
__global__ void
magma_sblockdot_kernel_shuffle(
int n,
int k,
const float * __restrict__ v,
const float * __restrict__ r,
float * __restrict__ vtmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = threadIdx.y;
float tmp;
if (i < n) {
tmp = v[i+j*n] * r[i];
} else {
tmp = MAGMA_S_ZERO;
}
tmp = blockReduceSum(tmp);
if (threadIdx.x == 0 ){
vtmp[ blockIdx.x+j*gridDim.x ] = tmp;
}
}
// dot product for multiple vectors using shuffle intrinsics and less shared memory
__global__ void
magma_sblockdot_kernel_shuffle_1dblock(
int n,
int k,
const float * __restrict__ v,
const float * __restrict__ r,
float * __restrict__ vtmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j;
for (j=0; j < k; j++) {
float tmp;
if (i < n) {
tmp = v[i+j*n] * r[i];
} else {
tmp = MAGMA_S_ZERO;
}
tmp = blockReduceSum_1D(tmp);
if (threadIdx.x == 0 ){
vtmp[ blockIdx.x+j*gridDim.x ] = tmp;
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc_shfl(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
if ( magma_getdevice_arch() < 300 ) {
return magma_smdotc( n, k, v, r, d1, d2, skp, queue );
}
else if (1) { // 1D block kernel seems to be always faster
dim3 block( BLOCK_SIZE );
dim3 grid( magma_ceildiv( n, block.x ) );
magma_sblockdot_kernel_shuffle_1dblock<<< grid, block, 32*sizeof(float), queue->cuda_stream() >>>( n, k, v, r, d1 );
int j;
for (j=0; j < k; j++) {
deviceReduceKernel<float> <<<1, 1024, 32*sizeof(float), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x);
}
} else {
dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k );
while (block.x*block.y > 1024) {
block.x -= 32;
}
dim3 grid( magma_ceildiv( n, block.x ) );
magma_sblockdot_kernel_shuffle<<< grid, block, 32*k*sizeof(float), queue->cuda_stream() >>>( n, k, v, r, d1 );
int j;
for (j=0; j < k; j++) {
deviceReduceKernel<float> <<<1, 1024, 32*sizeof(float), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x);
}
}
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgemvmdot_shfl(
magma_int_t n,
magma_int_t k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
if (k == 1) { // call CUBLAS dotc, we will never be faster
float res = magma_sdot( n, v, 1, r, 1, queue );
magma_ssetvector( 1, &res, 1, skp, 1, queue );
}
else if ( magma_getdevice_arch() < 300 ) {
return magma_sgemvmdot( n, k, v, r, d1, d2, skp, queue );
}
else {
magma_smdotc_shfl( n, k, v, r, d1, d2, skp, queue );
}
return MAGMA_SUCCESS;
}
|
4c1b498282e43bc2a22f4020f543b71f6dfd381d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/arg_where_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/common/fixed_vector.h"
#include <hipcub/hipcub.hpp>
namespace oneflow {
namespace {
constexpr int kFlatIndexToNdIndexProposedLaunchBlocks = 128;
template<typename T, size_t NDims>
struct StrideIterator {
typedef StrideIterator self_type;
typedef std::ptrdiff_t difference_type;
typedef T value_type;
typedef T* pointer;
typedef T& reference;
typedef std::random_access_iterator_tag iterator_category;
explicit StrideIterator(T* ptr, size_t max_iters) : ptr_(ptr), max_iters_(max_iters) {}
OF_DEVICE_FUNC reference operator[](int i) {
assert(0 <= i && i < max_iters_);
return *(ptr_ + (i * NDims));
}
private:
T* ptr_;
size_t max_iters_;
};
template<typename T, size_t NDims>
__global__ void CudaOffsetToNdIndexInplace(NdIndexOffsetHelper<T, NDims> index_converter,
const T* num_indices_ptr, T* indices_ptr) {
CUDA_1D_KERNEL_LOOP_T(T, i, *num_indices_ptr) {
T* cur_indices_ptr = indices_ptr + i * NDims;
index_converter.OffsetToNdIndex(*cur_indices_ptr, cur_indices_ptr);
}
}
template<typename T>
struct IsTrue {
OF_DEVICE_FUNC bool operator()(const T& val) const { return static_cast<bool>(val); }
};
template<typename T, typename I, typename Iter>
hipError_t SelectTrue(hipStream_t stream, int num_items, void* tmp, size_t& tmp_bytes,
const T* flags, Iter out_iter, I* num_selected) {
IsTrue<T> is_true;
hipcub::TransformInputIterator<bool, IsTrue<T>, const T*> flag_iter(flags, is_true);
hipcub::CountingInputIterator<I> offset_counter(0);
return hipcub::DeviceSelect::Flagged(tmp, tmp_bytes, offset_counter, flag_iter, out_iter,
num_selected, num_items, stream, false);
}
} // namespace
template<typename T, typename I, size_t NDims>
struct ArgWhereKernelUtil<DeviceType::kGPU, T, I, NDims> {
static void ArgWhere(DeviceCtx* ctx, const ShapeView& in_shape, const T* in_ptr, void* tmp,
size_t tmp_max_bytes, I* out_ptr, I* out_size_ptr) {
CHECK_NOTNULL(ctx);
CHECK_LE(in_shape.elem_cnt(), std::numeric_limits<I>::max());
size_t tmp_bytes = GetArgWhereWorkspaceSizeInBytes(ctx, in_shape.elem_cnt());
CHECK_LE(tmp_bytes, tmp_max_bytes);
if (NDims == 1) {
OF_CUDA_CHECK((SelectTrue<T, I, I*>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes,
in_ptr, out_ptr, out_size_ptr)));
} else {
StrideIterator<I, NDims> out_iter(out_ptr, in_shape.elem_cnt());
OF_CUDA_CHECK(
(SelectTrue<T, I, StrideIterator<I, NDims>>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp,
tmp_bytes, in_ptr, out_iter, out_size_ptr)));
fixed_vector<I, NDims> dims(NDims);
std::transform(in_shape.ptr(), in_shape.ptr() + in_shape.NumAxes(), dims.begin(),
[](int64_t dim) { return static_cast<I>(dim); });
NdIndexOffsetHelper<I, NDims> index_converter(dims.data(), dims.size());
hipLaunchKernelGGL(( CudaOffsetToNdIndexInplace<I, NDims>)
, dim3(kFlatIndexToNdIndexProposedLaunchBlocks), dim3(kCudaThreadsNumPerBlock), 0,
ctx->cuda_stream(), index_converter, out_size_ptr, out_ptr);
}
}
static size_t GetArgWhereWorkspaceSizeInBytes(DeviceCtx* ctx, int64_t n) {
hipStream_t stream = ctx ? ctx->cuda_stream() : 0;
size_t tmp_bytes = 0;
if (NDims == 1) {
OF_CUDA_CHECK(
(SelectTrue<T, I, I*>(stream, n, nullptr, tmp_bytes, nullptr, nullptr, nullptr)));
} else {
StrideIterator<I, NDims> out_iter(nullptr, n);
OF_CUDA_CHECK((SelectTrue<T, I, StrideIterator<I, NDims>>(stream, n, nullptr, tmp_bytes,
nullptr, out_iter, nullptr)));
}
return tmp_bytes;
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ARG_WHERE_KERNEL_UTIL, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace oneflow
| 4c1b498282e43bc2a22f4020f543b71f6dfd381d.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/arg_where_kernel_util.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/common/fixed_vector.h"
#include <cub/cub.cuh>
namespace oneflow {
namespace {
constexpr int kFlatIndexToNdIndexProposedLaunchBlocks = 128;
template<typename T, size_t NDims>
struct StrideIterator {
typedef StrideIterator self_type;
typedef std::ptrdiff_t difference_type;
typedef T value_type;
typedef T* pointer;
typedef T& reference;
typedef std::random_access_iterator_tag iterator_category;
explicit StrideIterator(T* ptr, size_t max_iters) : ptr_(ptr), max_iters_(max_iters) {}
OF_DEVICE_FUNC reference operator[](int i) {
assert(0 <= i && i < max_iters_);
return *(ptr_ + (i * NDims));
}
private:
T* ptr_;
size_t max_iters_;
};
template<typename T, size_t NDims>
__global__ void CudaOffsetToNdIndexInplace(NdIndexOffsetHelper<T, NDims> index_converter,
const T* num_indices_ptr, T* indices_ptr) {
CUDA_1D_KERNEL_LOOP_T(T, i, *num_indices_ptr) {
T* cur_indices_ptr = indices_ptr + i * NDims;
index_converter.OffsetToNdIndex(*cur_indices_ptr, cur_indices_ptr);
}
}
template<typename T>
struct IsTrue {
OF_DEVICE_FUNC bool operator()(const T& val) const { return static_cast<bool>(val); }
};
template<typename T, typename I, typename Iter>
cudaError_t SelectTrue(cudaStream_t stream, int num_items, void* tmp, size_t& tmp_bytes,
const T* flags, Iter out_iter, I* num_selected) {
IsTrue<T> is_true;
cub::TransformInputIterator<bool, IsTrue<T>, const T*> flag_iter(flags, is_true);
cub::CountingInputIterator<I> offset_counter(0);
return cub::DeviceSelect::Flagged(tmp, tmp_bytes, offset_counter, flag_iter, out_iter,
num_selected, num_items, stream, false);
}
} // namespace
template<typename T, typename I, size_t NDims>
struct ArgWhereKernelUtil<DeviceType::kGPU, T, I, NDims> {
static void ArgWhere(DeviceCtx* ctx, const ShapeView& in_shape, const T* in_ptr, void* tmp,
size_t tmp_max_bytes, I* out_ptr, I* out_size_ptr) {
CHECK_NOTNULL(ctx);
CHECK_LE(in_shape.elem_cnt(), std::numeric_limits<I>::max());
size_t tmp_bytes = GetArgWhereWorkspaceSizeInBytes(ctx, in_shape.elem_cnt());
CHECK_LE(tmp_bytes, tmp_max_bytes);
if (NDims == 1) {
OF_CUDA_CHECK((SelectTrue<T, I, I*>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes,
in_ptr, out_ptr, out_size_ptr)));
} else {
StrideIterator<I, NDims> out_iter(out_ptr, in_shape.elem_cnt());
OF_CUDA_CHECK(
(SelectTrue<T, I, StrideIterator<I, NDims>>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp,
tmp_bytes, in_ptr, out_iter, out_size_ptr)));
fixed_vector<I, NDims> dims(NDims);
std::transform(in_shape.ptr(), in_shape.ptr() + in_shape.NumAxes(), dims.begin(),
[](int64_t dim) { return static_cast<I>(dim); });
NdIndexOffsetHelper<I, NDims> index_converter(dims.data(), dims.size());
CudaOffsetToNdIndexInplace<I, NDims>
<<<kFlatIndexToNdIndexProposedLaunchBlocks, kCudaThreadsNumPerBlock, 0,
ctx->cuda_stream()>>>(index_converter, out_size_ptr, out_ptr);
}
}
static size_t GetArgWhereWorkspaceSizeInBytes(DeviceCtx* ctx, int64_t n) {
cudaStream_t stream = ctx ? ctx->cuda_stream() : 0;
size_t tmp_bytes = 0;
if (NDims == 1) {
OF_CUDA_CHECK(
(SelectTrue<T, I, I*>(stream, n, nullptr, tmp_bytes, nullptr, nullptr, nullptr)));
} else {
StrideIterator<I, NDims> out_iter(nullptr, n);
OF_CUDA_CHECK((SelectTrue<T, I, StrideIterator<I, NDims>>(stream, n, nullptr, tmp_bytes,
nullptr, out_iter, nullptr)));
}
return tmp_bytes;
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ARG_WHERE_KERNEL_UTIL, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
} // namespace oneflow
|
1f2a1b698ab517d65f61b3be047ad30966b30612.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
size_t ix = threadIdx.x + blockIdx.x * blockDim.x;
size_t iy = threadIdx.y + blockIdx.y * blockDim.y;
// make sure it does not overflow
if (ix < numRows && iy < numCols) {
size_t idx = ix + iy * numRows;
float r = rgbaImage[idx].x;
float g = rgbaImage[idx].y;
float b = rgbaImage[idx].z;
float i = 0.299 * r + 0.587 * g + 0.114 * b;
greyImage[idx] = i;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int h_nblock = 16;
int h_ngridX = 0;
int h_ngridY = 0;
if (numRows % h_nblock) {
h_ngridX = numRows / h_nblock + 1;
} else {
h_ngridX = numRows / h_nblock;
}
if (numCols % h_nblock) {
h_ngridY = numCols / h_nblock + 1;
} else {
h_ngridY = numCols / h_nblock;
}
printf("ngridX: %d, ngridY: %d", h_ngridX, h_ngridY);
const dim3 blockSize(h_nblock, h_nblock, 1); //TODO
const dim3 gridSize(h_ngridX, h_ngridY, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 1f2a1b698ab517d65f61b3be047ad30966b30612.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
size_t ix = threadIdx.x + blockIdx.x * blockDim.x;
size_t iy = threadIdx.y + blockIdx.y * blockDim.y;
// make sure it does not overflow
if (ix < numRows && iy < numCols) {
size_t idx = ix + iy * numRows;
float r = rgbaImage[idx].x;
float g = rgbaImage[idx].y;
float b = rgbaImage[idx].z;
float i = 0.299 * r + 0.587 * g + 0.114 * b;
greyImage[idx] = i;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int h_nblock = 16;
int h_ngridX = 0;
int h_ngridY = 0;
if (numRows % h_nblock) {
h_ngridX = numRows / h_nblock + 1;
} else {
h_ngridX = numRows / h_nblock;
}
if (numCols % h_nblock) {
h_ngridY = numCols / h_nblock + 1;
} else {
h_ngridY = numCols / h_nblock;
}
printf("ngridX: %d, ngridY: %d", h_ngridX, h_ngridY);
const dim3 blockSize(h_nblock, h_nblock, 1); //TODO
const dim3 gridSize(h_ngridX, h_ngridY, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
9bf5db17175113a68299c8598472e7e67e7f8e43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void vec_add(float *A, float *B, float *C, int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= N) {return;}
C[i] = A[i] + B[i];
}
void main(){
int N = 200
float *A_h = new float[N];
float *B_h = new float[N];
float *C_h = new float[N];
for(int i0; i<N; i++){
A_h[i] = 1.3f;
B_h[i] = 2.0f;
}
float *A_d, *B_d, *C_d;
hipMalloc( (void**) &A_d, N * sizeof(float));
hipMalloc( (void**) &B_d, N * sizeof(float));
hipMalloc( (void**) &C_d, N * sizeof(float));
hipMemcpy(A_d, A_h, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, N*sizeof(float), hipMemcpyHostToDevice);
int blocks = int(N-0.5)/256 + 1;
hipLaunchKernelGGL(( vec_add), dim3(blocks), dim3(256), 0, 0, A_d, B_d, C_d, N);
hipMemcpy(C_h, C_d, N*sizeof(float), hipMemcpyDeviceToHost)
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
}
| 9bf5db17175113a68299c8598472e7e67e7f8e43.cu | #include <stdio.h>
__global__ void vec_add(float *A, float *B, float *C, int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= N) {return;}
C[i] = A[i] + B[i];
}
void main(){
int N = 200
float *A_h = new float[N];
float *B_h = new float[N];
float *C_h = new float[N];
for(int i0; i<N; i++){
A_h[i] = 1.3f;
B_h[i] = 2.0f;
}
float *A_d, *B_d, *C_d;
cudaMalloc( (void**) &A_d, N * sizeof(float));
cudaMalloc( (void**) &B_d, N * sizeof(float));
cudaMalloc( (void**) &C_d, N * sizeof(float));
cudaMemcpy(A_d, A_h, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, N*sizeof(float), cudaMemcpyHostToDevice);
int blocks = int(N-0.5)/256 + 1;
vec_add<<<blocks, 256>>> (A_d, B_d, C_d, N);
cudaMemcpy(C_h, C_d, N*sizeof(float), cudaMemcpyDeviceToHost)
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
|
211084ddfc77623d129ccd34d58a013217571a6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Split(float * xf, bool * xb, size_t idxf, size_t idxb, size_t N, float threshold)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
xb[(idxb)*N+i] = (xf[(idxf-1)*N+i] < threshold);
}
return;
} | 211084ddfc77623d129ccd34d58a013217571a6a.cu | #include "includes.h"
__global__ void Split(float * xf, bool * xb, size_t idxf, size_t idxb, size_t N, float threshold)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
xb[(idxb)*N+i] = (xf[(idxf-1)*N+i] < threshold);
}
return;
} |
4c522bbb1b5c379372df35e75fe223d4c3fea8ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define NTHREADS 512
// Updates the column norms by subtracting the Hadamard-square of the
// Householder vector.
//
// N.B.: Overflow incurred in computing the square should already have
// been detected in the original norm construction.
__global__ void UpdateHHNorms(int cols, float *dV, float *dNorms) {
// Copyright 2009, Mark Seligman at Rapid Biologics, LLC. All rights
// reserved.
int colIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (colIndex < cols) {
float val = dV[colIndex];
dNorms[colIndex] -= val * val;
}
} | 4c522bbb1b5c379372df35e75fe223d4c3fea8ac.cu | #include "includes.h"
#define NTHREADS 512
// Updates the column norms by subtracting the Hadamard-square of the
// Householder vector.
//
// N.B.: Overflow incurred in computing the square should already have
// been detected in the original norm construction.
__global__ void UpdateHHNorms(int cols, float *dV, float *dNorms) {
// Copyright 2009, Mark Seligman at Rapid Biologics, LLC. All rights
// reserved.
int colIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (colIndex < cols) {
float val = dV[colIndex];
dNorms[colIndex] -= val * val;
}
} |
c7b178f6237fe56dc551c9314b164741a23d68fa.hip | // !!! This is a file automatically generated by hipify!!!
#pragma warning(disable:4819)
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Recursive Gaussian filter
sgreen 8/1/08
This code sample implements a Gaussian blur using Deriche's recursive method:
http://citeseer.ist.psu.edu/deriche93recursively.html
This is similar to the box filter sample in the SDK, but it uses the previous
outputs of the filter as well as the previous inputs. This is also known as an
IIR (infinite impulse response) filter, since its response to an input impulse
can last forever.
The main advantage of this method is that the execution time is independent of
the filter width.
The GPU processes columns of the image in parallel. To avoid uncoalesced reads
for the row pass we transpose the image and then transpose it back again
afterwards.
The implementation is based on code from the CImg library:
http://cimg.sourceforge.net/
Thanks to David Tschumperl and all the CImg contributors!
*/
// OpenGL Graphics includes
#include <helper_gl.h>
#if defined (__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#ifndef glutCloseFunc
#define glutCloseFunc glutWMCloseFunc
#endif
#else
#include <GL/freeglut.h>
#endif
// CUDA includes and interop headers
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// CUDA utilities and system includes
#include <helper_functions.h>
#include <helper_cuda.h> // includes cuda.h and hip/hip_runtime_api.h
#include <helper_cuda_gl.h> // includes hip/hip_runtime_api.h
#include <thrust/window_2d.h>
#include <thrust/block_2d.h>
#include <thrust/device_vector.h>
// Includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define MAX(a,b) ((a > b) ? a : b)
#define USE_SIMPLE_FILTER 0
#define MAX_EPSILON_ERROR 5.0f
#define THRESHOLD 0.15f
// Define the files that are to be save and the reference images for validation
const char *sOriginal[] =
{
"lena_10.ppm",
"lena_14.ppm",
"lena_18.ppm",
"lena_22.ppm",
NULL
};
const char *sReference[] =
{
"ref_10.ppm",
"ref_14.ppm",
"ref_18.ppm",
"ref_22.ppm",
NULL
};
const char *image_filename = "lena.ppm";
float sigma = 10.0f;
int order = 0;
int nthreads = 64; // number of threads per block
unsigned int width, height;
unsigned int *h_img = NULL;
unsigned int *d_img = NULL;
unsigned int *d_temp = NULL;
GLuint pbo = 0; // OpenGL pixel buffer object
GLuint texid = 0; // texture
StopWatchInterface *timer = 0;
// Auto-Verification Code
const int frameCheckNumber = 4;
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
unsigned int frameCount = 0;
int *pArgc = NULL;
char **pArgv = NULL;
bool runBenchmark = false;
const char *sSDKsample = "CUDA Recursive Gaussian";
extern "C"
void transpose(unsigned int *d_src, unsigned int *d_dest, unsigned int width, int height);
extern "C"
void gaussianFilterRGBA(unsigned int *d_src,thrust::block_2d<unsigned int> &block_d_input, unsigned int *d_dest,thrust::block_2d<unsigned int> &block_d_output, unsigned int *d_temp, int width, int height, float sigma, int order, int nthreads);
void cleanup();
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
char fps[256];
float ifps = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f);
sprintf(fps, "%s (sigma=%4.2f): %3.1f fps", sSDKsample, sigma, ifps);
glutSetWindowTitle(fps);
fpsCount = 0;
fpsLimit = ftoi(MAX(ifps, 1.f));
sdkResetTimer(&timer);
}
}
// display results using OpenGL
void display()
{
sdkStartTimer(&timer);
// execute filter, writing results to pbo
unsigned int *d_result;
checkCudaErrors(hipGLMapBufferObject__((void **)&d_result, pbo));
thrust::block_2d<unsigned int> block_d_output (width,height,0);
thrust::block_2d<unsigned int> block_d_input (width,height,0);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(hipMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),hipMemcpyDeviceToDevice));
checkCudaErrors(hipGLUnmapBufferObject(pbo));
// load texture from pbo
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBindTexture(GL_TEXTURE_2D, texid);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// display results
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glDisable(GL_DEPTH_TEST);
glBegin(GL_QUADS);
glTexCoord2f(0, 1);
glVertex2f(0, 0);
glTexCoord2f(1, 1);
glVertex2f(1, 0);
glTexCoord2f(1, 0);
glVertex2f(1, 1);
glTexCoord2f(0, 0);
glVertex2f(0, 1);
glEnd();
glDisable(GL_TEXTURE_2D);
glutSwapBuffers();
sdkStopTimer(&timer);
computeFPS();
}
void idle()
{
glutPostRedisplay();
}
void cleanup()
{
sdkDeleteTimer(&timer);
checkCudaErrors(hipFree(d_img));
checkCudaErrors(hipFree(d_temp));
if (!runBenchmark)
{
if (pbo)
{
checkCudaErrors(hipGLUnregisterBufferObject(pbo));
glDeleteBuffers(1, &pbo);
}
if (texid)
{
glDeleteTextures(1, &texid);
}
}
}
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case 27:
#if defined(__APPLE__) || defined(MACOSX)
exit(EXIT_SUCCESS);
#else
glutDestroyWindow(glutGetWindow());
return;
#endif
break;
case '=':
sigma+=0.1f;
break;
case '-':
sigma-=0.1f;
if (sigma < 0.0)
{
sigma = 0.0f;
}
break;
case '+':
sigma+=1.0f;
break;
case '_':
sigma-=1.0f;
if (sigma < 0.0)
{
sigma = 0.0f;
}
break;
case '0':
order = 0;
break;
case '1':
order = 1;
sigma = 0.5f;
break;
case '2':
order = 2;
sigma = 0.5f;
break;
default:
break;
}
printf("sigma = %f\n", sigma);
glutPostRedisplay();
}
void reshape(int x, int y)
{
glViewport(0, 0, x, y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
}
void initCudaBuffers()
{
unsigned int size = width * height * sizeof(unsigned int);
// allocate device memory
checkCudaErrors(hipMalloc((void **) &d_img, size));
checkCudaErrors(hipMalloc((void **) &d_temp, size));
checkCudaErrors(hipMemcpy(d_img, h_img, size, hipMemcpyHostToDevice));
sdkCreateTimer(&timer);
}
void initGLBuffers()
{
// create pixel buffer object to store final image
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, width*height*sizeof(GLubyte)*4, h_img, GL_STREAM_DRAW_ARB);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
checkCudaErrors(hipGLRegisterBufferObject(pbo));
// create texture for display
glGenTextures(1, &texid);
glBindTexture(GL_TEXTURE_2D, texid);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
}
void initGL(int *argc, char **argv)
{
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
glutInitWindowSize(width, height);
glutCreateWindow(sSDKsample);
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutReshapeFunc(reshape);
glutIdleFunc(idle);
#if defined (__APPLE__) || defined(MACOSX)
atexit(cleanup);
#else
glutCloseFunc(cleanup);
#endif
printf("Press '+' and '-' to change filter width\n");
printf("0, 1, 2 - change filter order\n");
if (!isGLVersionSupported(2,0) || !areGLExtensionsSupported("GL_ARB_vertex_buffer_object GL_ARB_pixel_buffer_object"))
{
fprintf(stderr, "Required OpenGL extensions missing.");
exit(EXIT_FAILURE);
}
}
void
benchmark(int iterations)
{
// allocate memory for result
unsigned int *d_result;
unsigned int size = width * height * sizeof(unsigned int);
checkCudaErrors(hipMalloc((void **) &d_result, size));
thrust::block_2d<unsigned int> block_d_output (width,height,0);
thrust::block_2d<unsigned int> block_d_input (width,height,0);
// warm-up
printf("Dimension = %dx%d \n",width,height);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(hipDeviceSynchronize());
sdkStartTimer(&timer);
// execute the kernel
for (int i = 0; i < iterations; i++)
{
gaussianFilterRGBA(d_img, block_d_input,d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
}
checkCudaErrors(hipMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),hipMemcpyDeviceToDevice));
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&timer);
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n", (width*height*iterations / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
checkCudaErrors(hipFree(d_result));
}
bool
runSingleTest(const char *ref_file, const char *exec_path)
{
// allocate memory for result
int nTotalErrors = 0;
unsigned int *d_result;
unsigned int size = width * height * sizeof(unsigned int);
checkCudaErrors(hipMalloc((void **) &d_result, size));
// warm-up
thrust::block_2d<unsigned int> block_d_output (width,height,0);
thrust::block_2d<unsigned int> block_d_input (width,height,0);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(hipMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),hipMemcpyDeviceToDevice));
checkCudaErrors(hipDeviceSynchronize());
sdkStartTimer(&timer);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(hipMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),hipMemcpyDeviceToDevice));
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("Kernel execution failed");
sdkStopTimer(&timer);
unsigned char *h_result = (unsigned char *)malloc(width*height*4);
checkCudaErrors(hipMemcpy(h_result, d_result, width*height*4, hipMemcpyDeviceToHost));
char dump_file[1024];
sprintf(dump_file, "lena_%02d.ppm", (int)sigma);
sdkSavePPM4ub(dump_file, h_result, width, height);
if (!sdkComparePPM(dump_file, sdkFindFilePath(ref_file, exec_path), MAX_EPSILON_ERROR, THRESHOLD, false))
{
nTotalErrors++;
}
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n", (width*height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
checkCudaErrors(hipFree(d_result));
free(h_result);
printf("Summary: %d errors!\n", nTotalErrors);
printf(nTotalErrors == 0 ? "Test passed\n": "Test failed!\n");
return (nTotalErrors == 0);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
char *ref_file = NULL;
#if defined(__linux__)
setenv ("DISPLAY", ":0", 0);
#endif
printf("%s Starting...\n\n", sSDKsample);
printf("NOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n");
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **)argv, "file"))
{
getCmdLineArgumentString(argc, (const char **)argv, "file", &ref_file);
fpsLimit = frameCheckNumber;
}
}
// Get the path of the filename
char *filename;
if (getCmdLineArgumentString(argc, (const char **) argv, "image", &filename))
{
image_filename = filename;
}
// load image
char *image_path = sdkFindFilePath(image_filename, argv[0]);
if (image_path == NULL)
{
fprintf(stderr, "Error unable to find and load image file: '%s'\n", image_filename);
exit(EXIT_FAILURE);
}
sdkLoadPPM4ub(image_path, (unsigned char **)&h_img, &width, &height);
if (!h_img)
{
printf("Error unable to load PPM file: '%s'\n", image_path);
exit(EXIT_FAILURE);
}
printf("Loaded '%s', %d x %d pixels\n", image_path, width, height);
if (checkCmdLineFlag(argc, (const char **)argv, "threads"))
{
nthreads = getCmdLineArgumentInt(argc, (const char **) argv, "threads");
}
if (checkCmdLineFlag(argc, (const char **)argv, "sigma"))
{
sigma = getCmdLineArgumentFloat(argc, (const char **) argv, "sigma");
}
runBenchmark = checkCmdLineFlag(argc, (const char **) argv, "benchmark");
int device;
struct hipDeviceProp_t prop;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
if (!strncmp("Tesla", prop.name, 5))
{
printf("Tesla card detected, running the test in benchmark mode (no OpenGL display)\n");
// runBenchmark = true;
runBenchmark = true;
}
// Benchmark or AutoTest mode detected, no OpenGL
if (runBenchmark == true || ref_file != NULL)
{
findCudaDevice(argc, (const char **)argv);
}
else
{
// First initialize OpenGL context, so we can properly set the GL for CUDA.
// This is necessary in order to achieve optimal performance with OpenGL/CUDA interop.
initGL(&argc, argv);
findCudaGLDevice(argc, (const char **)argv);
}
initCudaBuffers();
if (ref_file)
{
printf("(Automated Testing)\n");
bool testPassed = runSingleTest(ref_file, argv[0]);
cleanup();
exit(testPassed ? EXIT_SUCCESS : EXIT_FAILURE);
}
if (runBenchmark)
{
printf("(Run Benchmark)\n");
if(argc==3)
{
cleanup();
width = atoi(argv[2]);
height = atoi(argv[2]);
h_img = (unsigned int *)malloc(width*height*sizeof(unsigned int));
memset(h_img,213,sizeof(unsigned int)*width*height);
initCudaBuffers();
}
benchmark(100);
cleanup();
exit(EXIT_SUCCESS);
}
initGLBuffers();
glutMainLoop();
exit(EXIT_SUCCESS);
}
| c7b178f6237fe56dc551c9314b164741a23d68fa.cu | #pragma warning(disable:4819)
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Recursive Gaussian filter
sgreen 8/1/08
This code sample implements a Gaussian blur using Deriche's recursive method:
http://citeseer.ist.psu.edu/deriche93recursively.html
This is similar to the box filter sample in the SDK, but it uses the previous
outputs of the filter as well as the previous inputs. This is also known as an
IIR (infinite impulse response) filter, since its response to an input impulse
can last forever.
The main advantage of this method is that the execution time is independent of
the filter width.
The GPU processes columns of the image in parallel. To avoid uncoalesced reads
for the row pass we transpose the image and then transpose it back again
afterwards.
The implementation is based on code from the CImg library:
http://cimg.sourceforge.net/
Thanks to David Tschumperl� and all the CImg contributors!
*/
// OpenGL Graphics includes
#include <helper_gl.h>
#if defined (__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#ifndef glutCloseFunc
#define glutCloseFunc glutWMCloseFunc
#endif
#else
#include <GL/freeglut.h>
#endif
// CUDA includes and interop headers
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// CUDA utilities and system includes
#include <helper_functions.h>
#include <helper_cuda.h> // includes cuda.h and cuda_runtime_api.h
#include <helper_cuda_gl.h> // includes cuda_runtime_api.h
#include <thrust/window_2d.h>
#include <thrust/block_2d.h>
#include <thrust/device_vector.h>
// Includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define MAX(a,b) ((a > b) ? a : b)
#define USE_SIMPLE_FILTER 0
#define MAX_EPSILON_ERROR 5.0f
#define THRESHOLD 0.15f
// Define the files that are to be save and the reference images for validation
const char *sOriginal[] =
{
"lena_10.ppm",
"lena_14.ppm",
"lena_18.ppm",
"lena_22.ppm",
NULL
};
const char *sReference[] =
{
"ref_10.ppm",
"ref_14.ppm",
"ref_18.ppm",
"ref_22.ppm",
NULL
};
const char *image_filename = "lena.ppm";
float sigma = 10.0f;
int order = 0;
int nthreads = 64; // number of threads per block
unsigned int width, height;
unsigned int *h_img = NULL;
unsigned int *d_img = NULL;
unsigned int *d_temp = NULL;
GLuint pbo = 0; // OpenGL pixel buffer object
GLuint texid = 0; // texture
StopWatchInterface *timer = 0;
// Auto-Verification Code
const int frameCheckNumber = 4;
int fpsCount = 0; // FPS count for averaging
int fpsLimit = 1; // FPS limit for sampling
unsigned int frameCount = 0;
int *pArgc = NULL;
char **pArgv = NULL;
bool runBenchmark = false;
const char *sSDKsample = "CUDA Recursive Gaussian";
extern "C"
void transpose(unsigned int *d_src, unsigned int *d_dest, unsigned int width, int height);
extern "C"
void gaussianFilterRGBA(unsigned int *d_src,thrust::block_2d<unsigned int> &block_d_input, unsigned int *d_dest,thrust::block_2d<unsigned int> &block_d_output, unsigned int *d_temp, int width, int height, float sigma, int order, int nthreads);
void cleanup();
void computeFPS()
{
frameCount++;
fpsCount++;
if (fpsCount == fpsLimit)
{
char fps[256];
float ifps = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f);
sprintf(fps, "%s (sigma=%4.2f): %3.1f fps", sSDKsample, sigma, ifps);
glutSetWindowTitle(fps);
fpsCount = 0;
fpsLimit = ftoi(MAX(ifps, 1.f));
sdkResetTimer(&timer);
}
}
// display results using OpenGL
void display()
{
sdkStartTimer(&timer);
// execute filter, writing results to pbo
unsigned int *d_result;
checkCudaErrors(cudaGLMapBufferObject((void **)&d_result, pbo));
thrust::block_2d<unsigned int> block_d_output (width,height,0);
thrust::block_2d<unsigned int> block_d_input (width,height,0);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(cudaMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaGLUnmapBufferObject(pbo));
// load texture from pbo
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBindTexture(GL_TEXTURE_2D, texid);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// display results
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glDisable(GL_DEPTH_TEST);
glBegin(GL_QUADS);
glTexCoord2f(0, 1);
glVertex2f(0, 0);
glTexCoord2f(1, 1);
glVertex2f(1, 0);
glTexCoord2f(1, 0);
glVertex2f(1, 1);
glTexCoord2f(0, 0);
glVertex2f(0, 1);
glEnd();
glDisable(GL_TEXTURE_2D);
glutSwapBuffers();
sdkStopTimer(&timer);
computeFPS();
}
void idle()
{
glutPostRedisplay();
}
void cleanup()
{
sdkDeleteTimer(&timer);
checkCudaErrors(cudaFree(d_img));
checkCudaErrors(cudaFree(d_temp));
if (!runBenchmark)
{
if (pbo)
{
checkCudaErrors(cudaGLUnregisterBufferObject(pbo));
glDeleteBuffers(1, &pbo);
}
if (texid)
{
glDeleteTextures(1, &texid);
}
}
}
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case 27:
#if defined(__APPLE__) || defined(MACOSX)
exit(EXIT_SUCCESS);
#else
glutDestroyWindow(glutGetWindow());
return;
#endif
break;
case '=':
sigma+=0.1f;
break;
case '-':
sigma-=0.1f;
if (sigma < 0.0)
{
sigma = 0.0f;
}
break;
case '+':
sigma+=1.0f;
break;
case '_':
sigma-=1.0f;
if (sigma < 0.0)
{
sigma = 0.0f;
}
break;
case '0':
order = 0;
break;
case '1':
order = 1;
sigma = 0.5f;
break;
case '2':
order = 2;
sigma = 0.5f;
break;
default:
break;
}
printf("sigma = %f\n", sigma);
glutPostRedisplay();
}
void reshape(int x, int y)
{
glViewport(0, 0, x, y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
}
void initCudaBuffers()
{
unsigned int size = width * height * sizeof(unsigned int);
// allocate device memory
checkCudaErrors(cudaMalloc((void **) &d_img, size));
checkCudaErrors(cudaMalloc((void **) &d_temp, size));
checkCudaErrors(cudaMemcpy(d_img, h_img, size, cudaMemcpyHostToDevice));
sdkCreateTimer(&timer);
}
void initGLBuffers()
{
// create pixel buffer object to store final image
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, width*height*sizeof(GLubyte)*4, h_img, GL_STREAM_DRAW_ARB);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
checkCudaErrors(cudaGLRegisterBufferObject(pbo));
// create texture for display
glGenTextures(1, &texid);
glBindTexture(GL_TEXTURE_2D, texid);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
}
void initGL(int *argc, char **argv)
{
glutInit(argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
glutInitWindowSize(width, height);
glutCreateWindow(sSDKsample);
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutReshapeFunc(reshape);
glutIdleFunc(idle);
#if defined (__APPLE__) || defined(MACOSX)
atexit(cleanup);
#else
glutCloseFunc(cleanup);
#endif
printf("Press '+' and '-' to change filter width\n");
printf("0, 1, 2 - change filter order\n");
if (!isGLVersionSupported(2,0) || !areGLExtensionsSupported("GL_ARB_vertex_buffer_object GL_ARB_pixel_buffer_object"))
{
fprintf(stderr, "Required OpenGL extensions missing.");
exit(EXIT_FAILURE);
}
}
void
benchmark(int iterations)
{
// allocate memory for result
unsigned int *d_result;
unsigned int size = width * height * sizeof(unsigned int);
checkCudaErrors(cudaMalloc((void **) &d_result, size));
thrust::block_2d<unsigned int> block_d_output (width,height,0);
thrust::block_2d<unsigned int> block_d_input (width,height,0);
// warm-up
printf("Dimension = %dx%d \n",width,height);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(cudaDeviceSynchronize());
sdkStartTimer(&timer);
// execute the kernel
for (int i = 0; i < iterations; i++)
{
gaussianFilterRGBA(d_img, block_d_input,d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
}
checkCudaErrors(cudaMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&timer);
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n", (width*height*iterations / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
checkCudaErrors(cudaFree(d_result));
}
bool
runSingleTest(const char *ref_file, const char *exec_path)
{
// allocate memory for result
int nTotalErrors = 0;
unsigned int *d_result;
unsigned int size = width * height * sizeof(unsigned int);
checkCudaErrors(cudaMalloc((void **) &d_result, size));
// warm-up
thrust::block_2d<unsigned int> block_d_output (width,height,0);
thrust::block_2d<unsigned int> block_d_input (width,height,0);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(cudaMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaDeviceSynchronize());
sdkStartTimer(&timer);
gaussianFilterRGBA(d_img,block_d_input, d_result,block_d_output, d_temp, width, height, sigma, order, nthreads);
checkCudaErrors(cudaMemcpy(d_result,block_d_output.data_pointer,width*height*sizeof(unsigned int),cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("Kernel execution failed");
sdkStopTimer(&timer);
unsigned char *h_result = (unsigned char *)malloc(width*height*4);
checkCudaErrors(cudaMemcpy(h_result, d_result, width*height*4, cudaMemcpyDeviceToHost));
char dump_file[1024];
sprintf(dump_file, "lena_%02d.ppm", (int)sigma);
sdkSavePPM4ub(dump_file, h_result, width, height);
if (!sdkComparePPM(dump_file, sdkFindFilePath(ref_file, exec_path), MAX_EPSILON_ERROR, THRESHOLD, false))
{
nTotalErrors++;
}
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n", (width*height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
checkCudaErrors(cudaFree(d_result));
free(h_result);
printf("Summary: %d errors!\n", nTotalErrors);
printf(nTotalErrors == 0 ? "Test passed\n": "Test failed!\n");
return (nTotalErrors == 0);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
char *ref_file = NULL;
#if defined(__linux__)
setenv ("DISPLAY", ":0", 0);
#endif
printf("%s Starting...\n\n", sSDKsample);
printf("NOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n\n");
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **)argv, "file"))
{
getCmdLineArgumentString(argc, (const char **)argv, "file", &ref_file);
fpsLimit = frameCheckNumber;
}
}
// Get the path of the filename
char *filename;
if (getCmdLineArgumentString(argc, (const char **) argv, "image", &filename))
{
image_filename = filename;
}
// load image
char *image_path = sdkFindFilePath(image_filename, argv[0]);
if (image_path == NULL)
{
fprintf(stderr, "Error unable to find and load image file: '%s'\n", image_filename);
exit(EXIT_FAILURE);
}
sdkLoadPPM4ub(image_path, (unsigned char **)&h_img, &width, &height);
if (!h_img)
{
printf("Error unable to load PPM file: '%s'\n", image_path);
exit(EXIT_FAILURE);
}
printf("Loaded '%s', %d x %d pixels\n", image_path, width, height);
if (checkCmdLineFlag(argc, (const char **)argv, "threads"))
{
nthreads = getCmdLineArgumentInt(argc, (const char **) argv, "threads");
}
if (checkCmdLineFlag(argc, (const char **)argv, "sigma"))
{
sigma = getCmdLineArgumentFloat(argc, (const char **) argv, "sigma");
}
runBenchmark = checkCmdLineFlag(argc, (const char **) argv, "benchmark");
int device;
struct cudaDeviceProp prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
if (!strncmp("Tesla", prop.name, 5))
{
printf("Tesla card detected, running the test in benchmark mode (no OpenGL display)\n");
// runBenchmark = true;
runBenchmark = true;
}
// Benchmark or AutoTest mode detected, no OpenGL
if (runBenchmark == true || ref_file != NULL)
{
findCudaDevice(argc, (const char **)argv);
}
else
{
// First initialize OpenGL context, so we can properly set the GL for CUDA.
// This is necessary in order to achieve optimal performance with OpenGL/CUDA interop.
initGL(&argc, argv);
findCudaGLDevice(argc, (const char **)argv);
}
initCudaBuffers();
if (ref_file)
{
printf("(Automated Testing)\n");
bool testPassed = runSingleTest(ref_file, argv[0]);
cleanup();
exit(testPassed ? EXIT_SUCCESS : EXIT_FAILURE);
}
if (runBenchmark)
{
printf("(Run Benchmark)\n");
if(argc==3)
{
cleanup();
width = atoi(argv[2]);
height = atoi(argv[2]);
h_img = (unsigned int *)malloc(width*height*sizeof(unsigned int));
memset(h_img,213,sizeof(unsigned int)*width*height);
initCudaBuffers();
}
benchmark(100);
cleanup();
exit(EXIT_SUCCESS);
}
initGLBuffers();
glutMainLoop();
exit(EXIT_SUCCESS);
}
|
90339b0130ff53da9d5e31ee1e706cf37cb4406c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void solve(float* mat, float* b, float* x, int rows, int cols)
{
int n = blockIdx.x*threads1D + threadIdx.x;
if (n < rows) //Ensure bounds
x[n] = b[n] / mat[n * cols + n];
} | 90339b0130ff53da9d5e31ee1e706cf37cb4406c.cu | #include "includes.h"
__global__ void solve(float* mat, float* b, float* x, int rows, int cols)
{
int n = blockIdx.x*threads1D + threadIdx.x;
if (n < rows) //Ensure bounds
x[n] = b[n] / mat[n * cols + n];
} |
87789486410148ea94c97dd64bf43ea915378347.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernel_matrix_dist(float *A, float *B, float *C, const int nx1, const int nx2, const int dim) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy + nx2 * ix; // index in C
if((ix < nx1) && (iy < nx2)) {
for(int i = 0; i < dim; i++) {
C[idx] += (A[ix * dim + i] - B[iy * dim + i]) * (A[ix * dim + i] - B[iy * dim + i]);
}
C[idx] = sqrtf(C[idx]);
}
}
__global__ void kernel_matrix_dist_sharedMem(float *A, float *B, float *C, const int nx1, const int nx2, const int dim) {
extern __shared__ float sharedPoints[]; // length == blockSize (i.e. blockDim.x here)
int idx = threadIdx.x + blockIdx.x * blockDim.x; // index in A
int numInA = idx / dim;
int dimInA = idx % dim;
for(int currentBlock = 0; currentBlock < (nx2*dim/blockDim.x)+1; currentBlock++) {
// move a block of elements from B to shared memory in each iteration
if((threadIdx.x + currentBlock * blockDim.x) < (nx2 * dim)) {
sharedPoints[threadIdx.x] = B[threadIdx.x + currentBlock * blockDim.x];
}
__syncthreads(); // wait for finishing moving to shared memory
if(idx < (nx1 * dim)) {
// compute distance in corresponding dimension between this A_point to all buffered B_points in shared memory
for(int i = 0; i < blockDim.x; i++) {
int idxInB = i + currentBlock * blockDim.x;
if(idxInB >= (nx2 * dim)) break;
int numInB = idxInB / dim;
int dimInB = idxInB % dim;
if(dimInA == dimInB) {
int idxInC = numInB + nx2 * numInA;
// necessary to have atomic operation here otherwise random errors introduced
atomicAdd(&C[idxInC], (A[idx] - sharedPoints[i]) * (A[idx] - sharedPoints[i]));
}
}
}
}
__syncthreads(); // wait for finishing adding all dimensions for all points in C array
// thread with dimInA==0 do sqrtf() for the corresponding row in C
if(idx < (nx1 * dim) && dimInA == 0) {
for(int i = 0; i < nx2; i++) {
C[i + numInA * nx2] = sqrtf(C[i + numInA * nx2]);
}
}
} | 87789486410148ea94c97dd64bf43ea915378347.cu | #include <stdio.h>
__global__ void kernel_matrix_dist(float *A, float *B, float *C, const int nx1, const int nx2, const int dim) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy + nx2 * ix; // index in C
if((ix < nx1) && (iy < nx2)) {
for(int i = 0; i < dim; i++) {
C[idx] += (A[ix * dim + i] - B[iy * dim + i]) * (A[ix * dim + i] - B[iy * dim + i]);
}
C[idx] = sqrtf(C[idx]);
}
}
__global__ void kernel_matrix_dist_sharedMem(float *A, float *B, float *C, const int nx1, const int nx2, const int dim) {
extern __shared__ float sharedPoints[]; // length == blockSize (i.e. blockDim.x here)
int idx = threadIdx.x + blockIdx.x * blockDim.x; // index in A
int numInA = idx / dim;
int dimInA = idx % dim;
for(int currentBlock = 0; currentBlock < (nx2*dim/blockDim.x)+1; currentBlock++) {
// move a block of elements from B to shared memory in each iteration
if((threadIdx.x + currentBlock * blockDim.x) < (nx2 * dim)) {
sharedPoints[threadIdx.x] = B[threadIdx.x + currentBlock * blockDim.x];
}
__syncthreads(); // wait for finishing moving to shared memory
if(idx < (nx1 * dim)) {
// compute distance in corresponding dimension between this A_point to all buffered B_points in shared memory
for(int i = 0; i < blockDim.x; i++) {
int idxInB = i + currentBlock * blockDim.x;
if(idxInB >= (nx2 * dim)) break;
int numInB = idxInB / dim;
int dimInB = idxInB % dim;
if(dimInA == dimInB) {
int idxInC = numInB + nx2 * numInA;
// necessary to have atomic operation here otherwise random errors introduced
atomicAdd(&C[idxInC], (A[idx] - sharedPoints[i]) * (A[idx] - sharedPoints[i]));
}
}
}
}
__syncthreads(); // wait for finishing adding all dimensions for all points in C array
// thread with dimInA==0 do sqrtf() for the corresponding row in C
if(idx < (nx1 * dim) && dimInA == 0) {
for(int i = 0; i < nx2; i++) {
C[i + numInA * nx2] = sqrtf(C[i + numInA * nx2]);
}
}
} |
9e6bf32a5948630482dfc2dc139a6fce25c689af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// HEADERS
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
// DEFINITIONS
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define BLOCK_X 16
#define BLOCK_Y 16
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
#define DT 3.3333e-07f
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
// FUNCTIONS DECLARATION
void Ultrasonic_Tomography(const string&, int, float, int, float, float, float);
float norm(host_ptr<float> A, int nx, int ny)
{
float sum = 0;
for (int j = 0; j < ny; ++j)
for (int i = 0; i < nx; ++i)
sum += A(i, j) * A(i, j);
return sqrtf(sum);
}
void Position_Transducers(int *&ii, int *&jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
ii = (int*)malloc(num * sizeof(int));
jj = (int*)malloc(num * sizeof(int));
for(p = 0; p < 160; p++)
{
ii[p] = 21 + (p + 1);
jj[p] = 181;
}
for(p = 160; p < 320; p++)
{
ii[p] = 181;
jj[p] = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii[p] = 181 - ((p + 1) - 320);
jj[p] = 21;
}
for(p = 480; p < num; p++)
{
ii[p] = 21;
jj[p] = 21 + ((p + 1) - 480);
}
}
__global__ void propagation(
int jp1, int jp2, int ip1, int ip2,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i < NX && j < NY) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k) +
u(i-1, j, k) +
u(i, j-1, k) +
u(i, j+1, k)) +
s * u(i, j, k) -
u(i, j, k-1);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k) +
2.f * r * (1.f + r) * u(ia, ja, k) -
r * r * u(ib, jb, k) +
(2.f * r - 1.f) * u(i, j, k-1) -
2.f * r * u(ia, ja, k-1);
}
u(i, j, k+1) = val;
/* if (k+1 == NT - 1) */
/* printf("%e \t", val); */
}
}
__global__ void propagation_at_corners(kernel_ptr<float> u)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < NT) {
u(0, 0, k) =
1.f / 2.f * (u(0, 1, k) + u(1, 0, k));
u(NX-1, 0, k) =
1.f / 2.f * (u(NX-2, 0, k) + u(NX-1, 1, k));
u(0, NY-1, k) =
1.f / 2.f * (u(0, NY-2, k) + u(1, NY-1, k));
u(NX-1, NY-1, k) =
1.f / 2.f * (u(NX-2, NY-1, k) + u(NX-1, NY-2, k));
}
}
__global__ void difference_signal(
kernel_ptr<float> const u,
kernel_ptr<float> const g_bottom,
kernel_ptr<float> const g_right,
kernel_ptr<float> const g_top,
kernel_ptr<float> const g_left,
kernel_ptr<float> rr_bottom,
kernel_ptr<float> rr_right,
kernel_ptr<float> rr_top,
kernel_ptr<float> rr_left,
int p)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
if (i > 20 && i < 180 && k > 1 && k < NT) {
// store difference at time k of original signal
// and current signal at bottom sensor row
rr_bottom(i, k) =
g_bottom(i, k, p) -
u(i, 180, k);
/* printf("%e ", rr_bottom(i+21, k+2)); */
// store difference at time k of original signal
// and current signal at top sensor row
rr_top(i, k) =
g_top(i, k, p) -
u(i, 20, k);
// store difference at time k of original signal
// and current signal at right sensor column
rr_right(i, k) =
g_right(i, k, p) -
u(180, i, k);
// store difference at time k of original signal
// and current signal at left sensor column
rr_left(i, k) =
g_left(i, k, p) -
u(20, i, k);
}
}
__global__ void backpropagation1(
kernel_ptr<float> z,
kernel_ptr<float> const f,
int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1))
{
z(i, j, k) =
1500.f * 1500.f * (DT * DT) *
((1.f + f(i, j-1)) * z(i, j-1, k+1) +
(1.f + f(i, j+1)) * z(i, j+1, k+1) +
(1.f + f(i-1, j)) * z(i-1, j, k+1) +
(1.f + f(i+1, j)) * z(i+1, j, k+1) -
4.f * (1.f + f(i, j)) *
z(i, j, k+1)) / (H * H) +
2.f * z(i, j, k+1) -
z(i, j, k+2);
/* if (k == 1) */
/* printf("%e \t", z(i, j, k)); */
}
}
__global__ void backpropagation2(
kernel_ptr<float> z,
kernel_ptr<float> const rr_bottom,
kernel_ptr<float> const rr_right,
kernel_ptr<float> const rr_top,
kernel_ptr<float> const rr_left,
int k)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= 21 && i < 180) {
z(i, 180, k) =
z(i, 179, k) +
rr_bottom(i, k) * H * 1000.f;
z(i, 20, k) =
z(i, 21, k) +
rr_top(i, k) * H * 1000.f;
z(180, i, k) =
z(179, i, k) +
rr_right(i, k) * H * 1000.f;
z(20, i, k) =
z(21, i, k) +
rr_left(i, k) * H * 1000.f;
}
if (i >= 1 && i < (NX - 1)) {
z(i, 0, k) =
z(i, 1, k);
z(i, NY-1, k) =
z(i, NY-2, k);
z(0, i, k) =
z(1, i, k);
z(NX-1, i, k) =
z(NX-2, i, k);
}
else if (i == 0) {
z(0, 0, k) =
(z(1, 0, k) +
z(0, 1, k)) / 2.f;
z(NX-1, 0, k) =
(z(NX-2, 0, k) +
z(NX-1, 1, k)) / 2.f;
z(0, NY-1, k) =
(z(1, NY-1, k) +
z(0, NY-2, k)) / 2.f;
z(NX-1, NY-1, k) =
(z(NX-2, NY-1, k) +
z(NX-1, NY-2, k)) / 2.f;
}
}
__global__ void laplace(
kernel_ptr<float> const u,
kernel_ptr<float> Lu)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && (k + 1) < NT) {
int j_prev = (j > 0) ? j - 1 : j;
int j_next = (j < NY - 1) ? j + 1 : j;
int i_prev = (i > 0) ? i - 1 : i;
int i_next = (i < NX - 1) ? i + 1 : i;
Lu(i, j, k+1) =
(u(i, j_prev, k+1) +
u(i, j_next, k+1) +
u(i_prev, j, k+1) +
u(i_next, j, k+1) -
4.f * u(i, j, k+1)) / (H * H);
}
}
__global__ void laplace_corners(kernel_ptr<float> const u, kernel_ptr<float> Lu)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if ((k + 1) < NT) {
Lu(0, 0, k+1) =
(Lu(1, 0, k+1) +
Lu(0, 1, k+1)) / 2.f;
Lu(NX-1, 0, k+1) =
(Lu(NX-2, 0, k+1) +
Lu(NX-1, 1, k+1)) / 2.f;
Lu(0, NY-1, k+1) =
(Lu(1, NY-1, k+1) +
Lu(0, NY-2, k+1)) / 2.f;
Lu(NX-1, NY-1, k+1) =
(Lu(NX-2, NY-1, k+1) +
Lu(NX-1, NY-2, k+1)) / 2.f;
}
}
__global__ void update_differential(
kernel_ptr<float> df,
kernel_ptr<float> df_avg,
kernel_ptr<float> const z,
kernel_ptr<float> const Lu,
kernel_ptr<float> const f,
int g)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if(i < NX && j < NY && (k + 1) < NT) {
float val =
z(i, j, k+1) *
Lu(i, j, k+1) /
(1.f + f(i, j));
atomicAdd(
&df(i, j, g), val);
atomicAdd(
&df_avg(i, j), val);
}
}
__global__ void weights_differential(
kernel_ptr<float> const df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> weights,
int Ng)
{
int g = threadIdx.x + blockIdx.x * blockDim.x;
if (g < Ng) {
float sum = 0.f;
for (int j = 0; j < NY; ++j)
for (int i = 0; i < NX; ++i) {
float val = df(i, j, g) - df_avg(i, j) / Ng;
sum += val * val;
}
weights(g) = sqrtf(sum);
atomicAdd(
&weights(Ng),
1.f / weights(g));
}
}
__global__ void average_differential(
kernel_ptr<float> df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> const weights,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float weight = (1.f / weights(g)) / weights(Ng);
/* if (i == 0 && j == 0) */
/* printf("%.9g\n", weight); */
atomicAdd(
&df_avg(i, j),
df(i, j, g) * weight);
}
}
__global__ void update_field(
kernel_ptr<float> f,
kernel_ptr<float> const df_avg,
kernel_ptr<float> f_minus_fo,
kernel_ptr<float> const fo,
float scale,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
bool in_sensor_field = (i > 20) && (i < 180) && (j > 20) && (j < 180);
if (in_sensor_field)
f(i, j) += scale * (df_avg(i, j));
f_minus_fo(i, j) = f(i, j) - fo(i, j);
}
}
// MAIN PROGRAM
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 7) {
cerr << "Usage: " << argv[0] << " <fo_filename> <sensor group size> <target epsilon> <max iterations> <omega> <alpha>\n\n";
exit(1);
}
string fo_filename = argv[1];
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
int group_size = stoi(argv[2]);
float target_epsilon = stof(argv[3]);
int max_iterations = stoi(argv[4]);
float omega = stof(argv[5]);
float alpha = stof(argv[6]);
float beta = 1.f - alpha;
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
cout << setprecision(9);
cerr << setprecision(9);
Ultrasonic_Tomography(fo_filename, group_size, target_epsilon, max_iterations, omega, alpha, beta);
hipDeviceReset();
}
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// FUNCTIONS DEFINITION
void Ultrasonic_Tomography(const string &fo_filename, int group_size, float target_epsilon, int max_iterations, float omega, float alpha, float beta)
{
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
auto idx = fo_filename.find('.');
string basename = fo_filename.substr(0, idx);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n";
return;
}
string prefix = basename + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ifstream gb_in(gb_name);
ifstream gr_in(gr_name);
ifstream gt_in(gt_name);
ifstream gl_in(gl_name);
if (!gb_in) {
cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n";
return;
}
if (!gr_in) {
cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n";
return;
}
if (!gt_in) {
cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n";
return;
}
if (!gl_in) {
cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
read(gb_in, g_bottom);
copy(dev_g_bottom, g_bottom);
read(gr_in, g_right);
copy(dev_g_right, g_right);
read(gt_in, g_top);
copy(dev_g_top, g_top);
read(gl_in, g_left);
copy(dev_g_left, g_left);
}
// Simulation Variables
dim3 Block_Size(BLOCK_X, BLOCK_Y);
dim3 Grid_Size(grid_size(NX, BLOCK_X), grid_size(NY, BLOCK_Y));
device_ptr<float> dev_u(NX, NY, NT);
// Environment Initialization
// Position of the transducers
int *ii, *jj;
Position_Transducers(ii, jj, NS);
// Kaczmarz method
// propagation
device_ptr<float> dev_rr_bottom(NX, NT);
device_ptr<float> dev_rr_right(NX, NT);
device_ptr<float> dev_rr_top(NX, NT);
device_ptr<float> dev_rr_left(NX, NT);
dev_rr_bottom.set(0.f);
dev_rr_right.set(0.f);
dev_rr_top.set(0.f);
dev_rr_left.set(0.f);
device_ptr<float> dev_z(NX, NY, NT+1);
device_ptr<float> dev_Lu(NX, NY, NT);
dev_Lu.set(0.f);
device_ptr<float> dev_f(NX, NY);
dev_f.set(0.f);
device_ptr<float> dev_df(NX, NY, Ng);
device_ptr<float> dev_df_avg(NX, NY);
device_ptr<float> weights(Ng+1);
weights.set(0.f);
device_ptr<float> dev_f_minus_fo(NX, NY);
host_ptr<float> f(NX, NY);
host_ptr<float> f_minus_fo(NX, NY);
// initialize epsilon values
float prev_epsilon = 100.f;
float curr_epsilon = -std::numeric_limits<float>::infinity();
float file_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "writing convergence to 'art_convergence.txt'...\n" */
/* << "writing time to 'art_time.txt'...\n"; */
ofstream convergence_file("art_convergence.txt");
ofstream time_file("art_time.txt");
dim3 threads_propagation(NX, 1, 1);
dim3 grid_propagation(
grid_size(NX, threads_propagation.x),
grid_size(NY, threads_propagation.y));
dim3 threads_diff_signal(NX, 1);
dim3 grid_diff_signal(
grid_size(NX, threads_diff_signal.x),
grid_size(NT, threads_diff_signal.y));
dim3 threads_backpropagation1(NX, 1, 1);
dim3 grid_backpropagation1(
grid_size(NX, threads_backpropagation1.x),
grid_size(NY, threads_backpropagation1.y));
dim3 threads_laplace(96, 2, 1);
dim3 grid_laplace(
grid_size(NX, threads_laplace.x),
grid_size(NY, threads_laplace.y),
grid_size(NT, threads_laplace.z));
dim3 threads_differential(96, 2, 1);
dim3 grid_differential(
grid_size(NX, threads_differential.x),
grid_size(NY, threads_differential.y),
grid_size(NT, threads_differential.z));
dim3 threads_avg_diff(NX, 1, 1);
dim3 grid_avg_diff(
grid_size(NX, threads_avg_diff.x),
grid_size(NY, threads_avg_diff.y),
grid_size(Ng, threads_avg_diff.z));
cerr << "group size: " << group_size << "\n"
<< "target epsilon: " << target_epsilon << "\n"
<< "omega: " << omega << "\n"
<< "alpha: " << alpha << "\n"
<< "beta: " << beta << "\n\n";
cout << "iter\tepsilon\t\tdifference\tscale\n"
<< "----\t-------\t\t----------\t-----\n";
hipDeviceSynchronize();
int ti = clock();
for(int iter = 0; iter < max_iterations; iter++)
{
cout << iter << "\t";
dev_u.set(0.f);
dev_df.set(0.f);
dev_df_avg.set(0.f);
for(int p = 0; p < NS; p += group_size)
{
int g = p / group_size;
int jp1 = jj[p];
int jp2 = jj[p + group_size - 1];
int ip1 = ii[p];
int ip2 = ii[p + group_size - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for (int k = 1; k < NT - 1; k++)
{
hipLaunchKernelGGL(( propagation), dim3(grid_propagation), dim3(threads_propagation), 0, 0, jp1, jp2, ip1, ip2, dev_f, dev_u, k);
}
// Four corners
hipLaunchKernelGGL(( propagation_at_corners), dim3(NT), dim3(1), 0, 0, dev_u);
hipLaunchKernelGGL(( difference_signal), dim3(grid_diff_signal), dim3(threads_diff_signal), 0, 0, dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, g);
dev_z.set(0.f);
for (int k = NT - 2; k > 0; k--)
{
hipLaunchKernelGGL(( backpropagation1), dim3(grid_backpropagation1), dim3(threads_backpropagation1), 0, 0, dev_z, dev_f, k);
hipLaunchKernelGGL(( backpropagation2), dim3(NX), dim3(1), 0, 0, dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k);
}
hipLaunchKernelGGL(( laplace), dim3(grid_laplace), dim3(threads_laplace), 0, 0, dev_u, dev_Lu);
hipLaunchKernelGGL(( laplace_corners), dim3(NT), dim3(1), 0, 0, dev_u, dev_Lu);
hipLaunchKernelGGL(( update_differential), dim3(grid_differential), dim3(threads_differential), 0, 0, dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, g);
}
weights.set(0.f);
hipLaunchKernelGGL(( weights_differential), dim3(grid_size(Ng, Ng)), dim3(Ng), 0, 0, dev_df_avg, dev_df, weights, Ng);
dev_df_avg.set(0.f);
hipLaunchKernelGGL(( average_differential), dim3(grid_avg_diff), dim3(threads_avg_diff), 0, 0, dev_df_avg, dev_df, weights, Ng);
float scale = omega * (alpha + beta / pow(iter+1, 1.5f));
hipLaunchKernelGGL(( update_field), dim3(Grid_Size), dim3(Block_Size), 0, 0, dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng);
copy(f_minus_fo, dev_f_minus_fo);
curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
if (file_epsilon - curr_epsilon > 0.2f) {
convergence_file << curr_epsilon << " ";
time_file << current_t << " ";
file_epsilon = curr_epsilon;
}
cout << curr_epsilon << "\t" << curr_epsilon - prev_epsilon << "\t"
<< setw(10) << scale << " = " << omega * alpha << " + " << omega * (beta / (iter+1)) << "\n";
// stop if reached target epsilon
if (curr_epsilon <= target_epsilon) {
cerr << "reached target epsilon = " << target_epsilon << ", at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// stop if epsilon diverges
if (curr_epsilon > prev_epsilon ||
std::isnan(curr_epsilon)) {
cerr << "diverged at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
hipDeviceSynchronize();
int tf = clock();
cout << endl;
// copy from device to host
copy(f, dev_f);
string f_name = "art-" + to_string(group_size) + "-" + basename + ".txt";
/* cerr << "writing to '" << f_name << "'...\n\n"; */
ofstream f_out(f_name);
write(f_out, f);
// Free Variables
delete [] ii;
delete [] jj;
cerr << "time (s): " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
}
| 9e6bf32a5948630482dfc2dc139a6fce25c689af.cu | // HEADERS
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
// DEFINITIONS
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define BLOCK_X 16
#define BLOCK_Y 16
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
#define DT 3.3333e-07f
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
// FUNCTIONS DECLARATION
void Ultrasonic_Tomography(const string&, int, float, int, float, float, float);
float norm(host_ptr<float> A, int nx, int ny)
{
float sum = 0;
for (int j = 0; j < ny; ++j)
for (int i = 0; i < nx; ++i)
sum += A(i, j) * A(i, j);
return sqrtf(sum);
}
void Position_Transducers(int *&ii, int *&jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
ii = (int*)malloc(num * sizeof(int));
jj = (int*)malloc(num * sizeof(int));
for(p = 0; p < 160; p++)
{
ii[p] = 21 + (p + 1);
jj[p] = 181;
}
for(p = 160; p < 320; p++)
{
ii[p] = 181;
jj[p] = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii[p] = 181 - ((p + 1) - 320);
jj[p] = 21;
}
for(p = 480; p < num; p++)
{
ii[p] = 21;
jj[p] = 21 + ((p + 1) - 480);
}
}
__global__ void propagation(
int jp1, int jp2, int ip1, int ip2,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i < NX && j < NY) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k) +
u(i-1, j, k) +
u(i, j-1, k) +
u(i, j+1, k)) +
s * u(i, j, k) -
u(i, j, k-1);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k) +
2.f * r * (1.f + r) * u(ia, ja, k) -
r * r * u(ib, jb, k) +
(2.f * r - 1.f) * u(i, j, k-1) -
2.f * r * u(ia, ja, k-1);
}
u(i, j, k+1) = val;
/* if (k+1 == NT - 1) */
/* printf("%e \t", val); */
}
}
__global__ void propagation_at_corners(kernel_ptr<float> u)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < NT) {
u(0, 0, k) =
1.f / 2.f * (u(0, 1, k) + u(1, 0, k));
u(NX-1, 0, k) =
1.f / 2.f * (u(NX-2, 0, k) + u(NX-1, 1, k));
u(0, NY-1, k) =
1.f / 2.f * (u(0, NY-2, k) + u(1, NY-1, k));
u(NX-1, NY-1, k) =
1.f / 2.f * (u(NX-2, NY-1, k) + u(NX-1, NY-2, k));
}
}
__global__ void difference_signal(
kernel_ptr<float> const u,
kernel_ptr<float> const g_bottom,
kernel_ptr<float> const g_right,
kernel_ptr<float> const g_top,
kernel_ptr<float> const g_left,
kernel_ptr<float> rr_bottom,
kernel_ptr<float> rr_right,
kernel_ptr<float> rr_top,
kernel_ptr<float> rr_left,
int p)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
if (i > 20 && i < 180 && k > 1 && k < NT) {
// store difference at time k of original signal
// and current signal at bottom sensor row
rr_bottom(i, k) =
g_bottom(i, k, p) -
u(i, 180, k);
/* printf("%e ", rr_bottom(i+21, k+2)); */
// store difference at time k of original signal
// and current signal at top sensor row
rr_top(i, k) =
g_top(i, k, p) -
u(i, 20, k);
// store difference at time k of original signal
// and current signal at right sensor column
rr_right(i, k) =
g_right(i, k, p) -
u(180, i, k);
// store difference at time k of original signal
// and current signal at left sensor column
rr_left(i, k) =
g_left(i, k, p) -
u(20, i, k);
}
}
__global__ void backpropagation1(
kernel_ptr<float> z,
kernel_ptr<float> const f,
int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1))
{
z(i, j, k) =
1500.f * 1500.f * (DT * DT) *
((1.f + f(i, j-1)) * z(i, j-1, k+1) +
(1.f + f(i, j+1)) * z(i, j+1, k+1) +
(1.f + f(i-1, j)) * z(i-1, j, k+1) +
(1.f + f(i+1, j)) * z(i+1, j, k+1) -
4.f * (1.f + f(i, j)) *
z(i, j, k+1)) / (H * H) +
2.f * z(i, j, k+1) -
z(i, j, k+2);
/* if (k == 1) */
/* printf("%e \t", z(i, j, k)); */
}
}
__global__ void backpropagation2(
kernel_ptr<float> z,
kernel_ptr<float> const rr_bottom,
kernel_ptr<float> const rr_right,
kernel_ptr<float> const rr_top,
kernel_ptr<float> const rr_left,
int k)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= 21 && i < 180) {
z(i, 180, k) =
z(i, 179, k) +
rr_bottom(i, k) * H * 1000.f;
z(i, 20, k) =
z(i, 21, k) +
rr_top(i, k) * H * 1000.f;
z(180, i, k) =
z(179, i, k) +
rr_right(i, k) * H * 1000.f;
z(20, i, k) =
z(21, i, k) +
rr_left(i, k) * H * 1000.f;
}
if (i >= 1 && i < (NX - 1)) {
z(i, 0, k) =
z(i, 1, k);
z(i, NY-1, k) =
z(i, NY-2, k);
z(0, i, k) =
z(1, i, k);
z(NX-1, i, k) =
z(NX-2, i, k);
}
else if (i == 0) {
z(0, 0, k) =
(z(1, 0, k) +
z(0, 1, k)) / 2.f;
z(NX-1, 0, k) =
(z(NX-2, 0, k) +
z(NX-1, 1, k)) / 2.f;
z(0, NY-1, k) =
(z(1, NY-1, k) +
z(0, NY-2, k)) / 2.f;
z(NX-1, NY-1, k) =
(z(NX-2, NY-1, k) +
z(NX-1, NY-2, k)) / 2.f;
}
}
__global__ void laplace(
kernel_ptr<float> const u,
kernel_ptr<float> Lu)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && (k + 1) < NT) {
int j_prev = (j > 0) ? j - 1 : j;
int j_next = (j < NY - 1) ? j + 1 : j;
int i_prev = (i > 0) ? i - 1 : i;
int i_next = (i < NX - 1) ? i + 1 : i;
Lu(i, j, k+1) =
(u(i, j_prev, k+1) +
u(i, j_next, k+1) +
u(i_prev, j, k+1) +
u(i_next, j, k+1) -
4.f * u(i, j, k+1)) / (H * H);
}
}
__global__ void laplace_corners(kernel_ptr<float> const u, kernel_ptr<float> Lu)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if ((k + 1) < NT) {
Lu(0, 0, k+1) =
(Lu(1, 0, k+1) +
Lu(0, 1, k+1)) / 2.f;
Lu(NX-1, 0, k+1) =
(Lu(NX-2, 0, k+1) +
Lu(NX-1, 1, k+1)) / 2.f;
Lu(0, NY-1, k+1) =
(Lu(1, NY-1, k+1) +
Lu(0, NY-2, k+1)) / 2.f;
Lu(NX-1, NY-1, k+1) =
(Lu(NX-2, NY-1, k+1) +
Lu(NX-1, NY-2, k+1)) / 2.f;
}
}
__global__ void update_differential(
kernel_ptr<float> df,
kernel_ptr<float> df_avg,
kernel_ptr<float> const z,
kernel_ptr<float> const Lu,
kernel_ptr<float> const f,
int g)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if(i < NX && j < NY && (k + 1) < NT) {
float val =
z(i, j, k+1) *
Lu(i, j, k+1) /
(1.f + f(i, j));
atomicAdd(
&df(i, j, g), val);
atomicAdd(
&df_avg(i, j), val);
}
}
__global__ void weights_differential(
kernel_ptr<float> const df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> weights,
int Ng)
{
int g = threadIdx.x + blockIdx.x * blockDim.x;
if (g < Ng) {
float sum = 0.f;
for (int j = 0; j < NY; ++j)
for (int i = 0; i < NX; ++i) {
float val = df(i, j, g) - df_avg(i, j) / Ng;
sum += val * val;
}
weights(g) = sqrtf(sum);
atomicAdd(
&weights(Ng),
1.f / weights(g));
}
}
__global__ void average_differential(
kernel_ptr<float> df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> const weights,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float weight = (1.f / weights(g)) / weights(Ng);
/* if (i == 0 && j == 0) */
/* printf("%.9g\n", weight); */
atomicAdd(
&df_avg(i, j),
df(i, j, g) * weight);
}
}
__global__ void update_field(
kernel_ptr<float> f,
kernel_ptr<float> const df_avg,
kernel_ptr<float> f_minus_fo,
kernel_ptr<float> const fo,
float scale,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
bool in_sensor_field = (i > 20) && (i < 180) && (j > 20) && (j < 180);
if (in_sensor_field)
f(i, j) += scale * (df_avg(i, j));
f_minus_fo(i, j) = f(i, j) - fo(i, j);
}
}
// MAIN PROGRAM
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 7) {
cerr << "Usage: " << argv[0] << " <fo_filename> <sensor group size> <target epsilon> <max iterations> <omega> <alpha>\n\n";
exit(1);
}
string fo_filename = argv[1];
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
int group_size = stoi(argv[2]);
float target_epsilon = stof(argv[3]);
int max_iterations = stoi(argv[4]);
float omega = stof(argv[5]);
float alpha = stof(argv[6]);
float beta = 1.f - alpha;
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
cout << setprecision(9);
cerr << setprecision(9);
Ultrasonic_Tomography(fo_filename, group_size, target_epsilon, max_iterations, omega, alpha, beta);
cudaDeviceReset();
}
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// FUNCTIONS DEFINITION
void Ultrasonic_Tomography(const string &fo_filename, int group_size, float target_epsilon, int max_iterations, float omega, float alpha, float beta)
{
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
auto idx = fo_filename.find('.');
string basename = fo_filename.substr(0, idx);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n";
return;
}
string prefix = basename + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ifstream gb_in(gb_name);
ifstream gr_in(gr_name);
ifstream gt_in(gt_name);
ifstream gl_in(gl_name);
if (!gb_in) {
cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n";
return;
}
if (!gr_in) {
cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n";
return;
}
if (!gt_in) {
cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n";
return;
}
if (!gl_in) {
cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
read(gb_in, g_bottom);
copy(dev_g_bottom, g_bottom);
read(gr_in, g_right);
copy(dev_g_right, g_right);
read(gt_in, g_top);
copy(dev_g_top, g_top);
read(gl_in, g_left);
copy(dev_g_left, g_left);
}
// Simulation Variables
dim3 Block_Size(BLOCK_X, BLOCK_Y);
dim3 Grid_Size(grid_size(NX, BLOCK_X), grid_size(NY, BLOCK_Y));
device_ptr<float> dev_u(NX, NY, NT);
// Environment Initialization
// Position of the transducers
int *ii, *jj;
Position_Transducers(ii, jj, NS);
// Kaczmarz method
// propagation
device_ptr<float> dev_rr_bottom(NX, NT);
device_ptr<float> dev_rr_right(NX, NT);
device_ptr<float> dev_rr_top(NX, NT);
device_ptr<float> dev_rr_left(NX, NT);
dev_rr_bottom.set(0.f);
dev_rr_right.set(0.f);
dev_rr_top.set(0.f);
dev_rr_left.set(0.f);
device_ptr<float> dev_z(NX, NY, NT+1);
device_ptr<float> dev_Lu(NX, NY, NT);
dev_Lu.set(0.f);
device_ptr<float> dev_f(NX, NY);
dev_f.set(0.f);
device_ptr<float> dev_df(NX, NY, Ng);
device_ptr<float> dev_df_avg(NX, NY);
device_ptr<float> weights(Ng+1);
weights.set(0.f);
device_ptr<float> dev_f_minus_fo(NX, NY);
host_ptr<float> f(NX, NY);
host_ptr<float> f_minus_fo(NX, NY);
// initialize epsilon values
float prev_epsilon = 100.f;
float curr_epsilon = -std::numeric_limits<float>::infinity();
float file_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "writing convergence to 'art_convergence.txt'...\n" */
/* << "writing time to 'art_time.txt'...\n"; */
ofstream convergence_file("art_convergence.txt");
ofstream time_file("art_time.txt");
dim3 threads_propagation(NX, 1, 1);
dim3 grid_propagation(
grid_size(NX, threads_propagation.x),
grid_size(NY, threads_propagation.y));
dim3 threads_diff_signal(NX, 1);
dim3 grid_diff_signal(
grid_size(NX, threads_diff_signal.x),
grid_size(NT, threads_diff_signal.y));
dim3 threads_backpropagation1(NX, 1, 1);
dim3 grid_backpropagation1(
grid_size(NX, threads_backpropagation1.x),
grid_size(NY, threads_backpropagation1.y));
dim3 threads_laplace(96, 2, 1);
dim3 grid_laplace(
grid_size(NX, threads_laplace.x),
grid_size(NY, threads_laplace.y),
grid_size(NT, threads_laplace.z));
dim3 threads_differential(96, 2, 1);
dim3 grid_differential(
grid_size(NX, threads_differential.x),
grid_size(NY, threads_differential.y),
grid_size(NT, threads_differential.z));
dim3 threads_avg_diff(NX, 1, 1);
dim3 grid_avg_diff(
grid_size(NX, threads_avg_diff.x),
grid_size(NY, threads_avg_diff.y),
grid_size(Ng, threads_avg_diff.z));
cerr << "group size: " << group_size << "\n"
<< "target epsilon: " << target_epsilon << "\n"
<< "omega: " << omega << "\n"
<< "alpha: " << alpha << "\n"
<< "beta: " << beta << "\n\n";
cout << "iter\tepsilon\t\tdifference\tscale\n"
<< "----\t-------\t\t----------\t-----\n";
cudaDeviceSynchronize();
int ti = clock();
for(int iter = 0; iter < max_iterations; iter++)
{
cout << iter << "\t";
dev_u.set(0.f);
dev_df.set(0.f);
dev_df_avg.set(0.f);
for(int p = 0; p < NS; p += group_size)
{
int g = p / group_size;
int jp1 = jj[p];
int jp2 = jj[p + group_size - 1];
int ip1 = ii[p];
int ip2 = ii[p + group_size - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for (int k = 1; k < NT - 1; k++)
{
propagation<<<grid_propagation, threads_propagation>>>(jp1, jp2, ip1, ip2, dev_f, dev_u, k);
}
// Four corners
propagation_at_corners<<<NT, 1>>>(dev_u);
difference_signal<<<grid_diff_signal, threads_diff_signal>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, g);
dev_z.set(0.f);
for (int k = NT - 2; k > 0; k--)
{
backpropagation1<<<grid_backpropagation1, threads_backpropagation1>>>(dev_z, dev_f, k);
backpropagation2<<<NX, 1>>>(dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k);
}
laplace<<<grid_laplace, threads_laplace>>>(dev_u, dev_Lu);
laplace_corners<<<NT, 1>>>(dev_u, dev_Lu);
update_differential<<<grid_differential, threads_differential>>>(dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, g);
}
weights.set(0.f);
weights_differential<<<grid_size(Ng, Ng), Ng>>>(dev_df_avg, dev_df, weights, Ng);
dev_df_avg.set(0.f);
average_differential<<<grid_avg_diff, threads_avg_diff>>>(dev_df_avg, dev_df, weights, Ng);
float scale = omega * (alpha + beta / pow(iter+1, 1.5f));
update_field<<<Grid_Size, Block_Size>>>(dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng);
copy(f_minus_fo, dev_f_minus_fo);
curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
if (file_epsilon - curr_epsilon > 0.2f) {
convergence_file << curr_epsilon << " ";
time_file << current_t << " ";
file_epsilon = curr_epsilon;
}
cout << curr_epsilon << "\t" << curr_epsilon - prev_epsilon << "\t"
<< setw(10) << scale << " = " << omega * alpha << " + " << omega * (beta / (iter+1)) << "\n";
// stop if reached target epsilon
if (curr_epsilon <= target_epsilon) {
cerr << "reached target epsilon = " << target_epsilon << ", at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// stop if epsilon diverges
if (curr_epsilon > prev_epsilon ||
std::isnan(curr_epsilon)) {
cerr << "diverged at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
cudaDeviceSynchronize();
int tf = clock();
cout << endl;
// copy from device to host
copy(f, dev_f);
string f_name = "art-" + to_string(group_size) + "-" + basename + ".txt";
/* cerr << "writing to '" << f_name << "'...\n\n"; */
ofstream f_out(f_name);
write(f_out, f);
// Free Variables
delete [] ii;
delete [] jj;
cerr << "time (s): " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
}
|
917d3251dc4c2bb4a286981ca289ba93dfb2d1df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _VELOCITY_KERNEL_H_
#define _VELOCITY_KERNEL_H_
__global__ void
velocity_kernel(float *vx, float* vy, float *xx, float *yy, float *tt, float *xy, float *xt, float *yt, int img_width, int img_height, int pitch) {
int idx = MUL(blockIdx.x, VELOCITY_TILE_WIDTH) + threadIdx.x;
int idy = MUL(blockIdx.y, VELOCITY_TILE_HEIGHT) + threadIdx.y;
int index = MUL(idy,pitch) + idx;
if(idx < img_width && idy < img_height) {
//shared memory is not used in this kernel, but none of these values will be accessed by other threads
float tmp1x2 = xx[index] * yy[index]; //GxGx.*GyGy
float tmp1x5 = xx[index] * yt[index]; //GxGx.*GyGt
float tmp3x5 = xy[index] * yt[index]; //GxGy.*GyGt
float tmp3x4 = xy[index] * xt[index]; //GxGy.*GxGt
float tmp2x4 = yy[index] * xt[index]; //GyGy.*GxGt
float tmp3x3 = xy[index] * xy[index]; //GxGy.*GxGy
float Vx_dividend = tmp2x4 - tmp3x5; //GyGy.*GxGt - GxGy.*GyGt
float Vy_dividend = tmp1x5 - tmp3x4; //GxGx.*GyGt - GxGy.*GxGt
float divisor = tmp1x2 - tmp3x3; //GxGx.*GyGy - GxGy.*GxGy
float tempVx, tempVy;
if( divisor == 0 ) {
tempVx = 0;
tempVy = 0;
} else {
tempVx = DIV(Vx_dividend, divisor);
tempVy = DIV(Vy_dividend, divisor);
}
if( divisor < 0.5f ) {
//k must be calculated, velocity recalculated - Roger recalculates velocity using the velocity calculated in the previous pixel
//(either to the left or up one pixel). I am doing it using the velocity originally calculated for that pixel.
float k = (tt[index] - 2*xt[index]*tempVx - 2*yt[index]*tempVy + xx[index]*tempVx*tempVx + 2*xy[index]*tempVx*tempVy + yy[index]*tempVy*tempVy)/28.0f;
float gxx_n = xx[index] + k;
float gyy_n = yy[index] + k;
tmp1x2 = gxx_n * gyy_n; //GxGx.*GyGy
tmp1x5 = gxx_n * yt[index]; //GxGx.*GyGz
tmp2x4 = gyy_n * xt[index]; //GyGy.*GxGz
Vx_dividend = tmp2x4 - tmp3x5; //GyGy.*GxGt - GxGy.*GyGt
Vy_dividend = tmp1x5 - tmp3x4; //GxGx.*GyGz - GxGy.*GxGz
divisor = tmp1x2 - tmp3x3; //GxGx.*GyGy - GxGy.*GxGy
if( divisor == 0 ) {
tempVx = 0;
tempVy = 0;
} else {
tempVx = DIV(Vx_dividend, divisor);
tempVy = DIV(Vy_dividend, divisor);
}
}
#if( MATCH_ROGER )
{
if( tempVx > 7 ) tempVx = 7;
if( tempVx < -8 ) tempVx = -8;
if( tempVy > 7 ) tempVy = 7;
if( tempVy < -8 ) tempVy = -8;
}
#endif
vx[index] = tempVx;
vy[index] = tempVy;
}
}
#endif // #ifndef _VELOCITY_KERNEL_H_
| 917d3251dc4c2bb4a286981ca289ba93dfb2d1df.cu |
#ifndef _VELOCITY_KERNEL_H_
#define _VELOCITY_KERNEL_H_
__global__ void
velocity_kernel(float *vx, float* vy, float *xx, float *yy, float *tt, float *xy, float *xt, float *yt, int img_width, int img_height, int pitch) {
int idx = MUL(blockIdx.x, VELOCITY_TILE_WIDTH) + threadIdx.x;
int idy = MUL(blockIdx.y, VELOCITY_TILE_HEIGHT) + threadIdx.y;
int index = MUL(idy,pitch) + idx;
if(idx < img_width && idy < img_height) {
//shared memory is not used in this kernel, but none of these values will be accessed by other threads
float tmp1x2 = xx[index] * yy[index]; //GxGx.*GyGy
float tmp1x5 = xx[index] * yt[index]; //GxGx.*GyGt
float tmp3x5 = xy[index] * yt[index]; //GxGy.*GyGt
float tmp3x4 = xy[index] * xt[index]; //GxGy.*GxGt
float tmp2x4 = yy[index] * xt[index]; //GyGy.*GxGt
float tmp3x3 = xy[index] * xy[index]; //GxGy.*GxGy
float Vx_dividend = tmp2x4 - tmp3x5; //GyGy.*GxGt - GxGy.*GyGt
float Vy_dividend = tmp1x5 - tmp3x4; //GxGx.*GyGt - GxGy.*GxGt
float divisor = tmp1x2 - tmp3x3; //GxGx.*GyGy - GxGy.*GxGy
float tempVx, tempVy;
if( divisor == 0 ) {
tempVx = 0;
tempVy = 0;
} else {
tempVx = DIV(Vx_dividend, divisor);
tempVy = DIV(Vy_dividend, divisor);
}
if( divisor < 0.5f ) {
//k must be calculated, velocity recalculated - Roger recalculates velocity using the velocity calculated in the previous pixel
//(either to the left or up one pixel). I am doing it using the velocity originally calculated for that pixel.
float k = (tt[index] - 2*xt[index]*tempVx - 2*yt[index]*tempVy + xx[index]*tempVx*tempVx + 2*xy[index]*tempVx*tempVy + yy[index]*tempVy*tempVy)/28.0f;
float gxx_n = xx[index] + k;
float gyy_n = yy[index] + k;
tmp1x2 = gxx_n * gyy_n; //GxGx.*GyGy
tmp1x5 = gxx_n * yt[index]; //GxGx.*GyGz
tmp2x4 = gyy_n * xt[index]; //GyGy.*GxGz
Vx_dividend = tmp2x4 - tmp3x5; //GyGy.*GxGt - GxGy.*GyGt
Vy_dividend = tmp1x5 - tmp3x4; //GxGx.*GyGz - GxGy.*GxGz
divisor = tmp1x2 - tmp3x3; //GxGx.*GyGy - GxGy.*GxGy
if( divisor == 0 ) {
tempVx = 0;
tempVy = 0;
} else {
tempVx = DIV(Vx_dividend, divisor);
tempVy = DIV(Vy_dividend, divisor);
}
}
#if( MATCH_ROGER )
{
if( tempVx > 7 ) tempVx = 7;
if( tempVx < -8 ) tempVx = -8;
if( tempVy > 7 ) tempVy = 7;
if( tempVy < -8 ) tempVy = -8;
}
#endif
vx[index] = tempVx;
vy[index] = tempVy;
}
}
#endif // #ifndef _VELOCITY_KERNEL_H_
|
38753fdea84818cafab93fc57d8d85ef3c22af86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Compute_weightx_weighty1_norm2_Kernel(float* weightx, float* weighty, const float* psi, const float* phi, const float* absIx, const float* absIy, int nPixels, float norm_for_smooth_term, float eps)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int x = bx*blockDim.x + tx;
if (x >= nPixels)
return;
weightx[x] = psi[x];
weighty[x] = phi[x];
} | 38753fdea84818cafab93fc57d8d85ef3c22af86.cu | #include "includes.h"
__global__ void Compute_weightx_weighty1_norm2_Kernel(float* weightx, float* weighty, const float* psi, const float* phi, const float* absIx, const float* absIy, int nPixels, float norm_for_smooth_term, float eps)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int x = bx*blockDim.x + tx;
if (x >= nPixels)
return;
weightx[x] = psi[x];
weighty[x] = phi[x];
} |
3bfb462acc544e5a9e1f98e97281f3dd73ba3510.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
int main(){
std::cout << "hi" << std::endl;
int devicesCount;
hipGetDeviceCount(&devicesCount);
for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, deviceIndex);
// printf("Device name: %s", deviceProperties.name);
std::cout << deviceProperties.name << std::endl;
}
return 0;
} | 3bfb462acc544e5a9e1f98e97281f3dd73ba3510.cu | #include <iostream>
int main(){
std::cout << "hi" << std::endl;
int devicesCount;
cudaGetDeviceCount(&devicesCount);
for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
// printf("Device name: %s", deviceProperties.name);
std::cout << deviceProperties.name << std::endl;
}
return 0;
} |
8888587da55831a9d238b13cf73d436dc10e8b41.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "const_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float ALPHA = 2;
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
int INCX = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
const_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,INCX);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
const_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,INCX);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
const_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,INCX);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8888587da55831a9d238b13cf73d436dc10e8b41.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "const_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float ALPHA = 2;
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
int INCX = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
const_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,INCX);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
const_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,INCX);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
const_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,INCX);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7d18c24e8f24b2d7536f21a42039bc508d111c82.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 64
__global__ void init(int *a) {
a[threadIdx.x] = 1;
}
__global__ void K(int *a) {
// this forces other threads to return false. Ideally, other threads should be don't care.
//unsigned mask = __ballot(threadIdx.x % 3 == 0 && a[threadIdx.x] == 0);
unsigned mask = __ballot(threadIdx.x % 3 == 0 && a[threadIdx.x] == 0 || threadIdx.x % 3 != 0);
if (threadIdx.x % 32 == 0) {
printf("%X\n", mask);
}
}
int main() {
int *a;
hipMalloc(&a, N * sizeof(int));
hipLaunchKernelGGL(( init), dim3(1), dim3(N), 0, 0, a);
hipLaunchKernelGGL(( K), dim3(1), dim3(N), 0, 0, a);
hipDeviceSynchronize();
return 0;
}
| 7d18c24e8f24b2d7536f21a42039bc508d111c82.cu | #include <stdio.h>
#include <cuda.h>
#define N 64
__global__ void init(int *a) {
a[threadIdx.x] = 1;
}
__global__ void K(int *a) {
// this forces other threads to return false. Ideally, other threads should be don't care.
//unsigned mask = __ballot(threadIdx.x % 3 == 0 && a[threadIdx.x] == 0);
unsigned mask = __ballot(threadIdx.x % 3 == 0 && a[threadIdx.x] == 0 || threadIdx.x % 3 != 0);
if (threadIdx.x % 32 == 0) {
printf("%X\n", mask);
}
}
int main() {
int *a;
cudaMalloc(&a, N * sizeof(int));
init<<<1, N>>>(a);
K<<<1, N>>>(a);
cudaDeviceSynchronize();
return 0;
}
|
afdad6c9e2b92ce61252c2595d9c1a9da1a14a90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPUFILTERS
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
/////////////////////////////////////////// Corner Harris /////////////////////////////////////////////////
texture<float, hipTextureType2D, hipReadModeElementType> harrisDxTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<float, hipTextureType2D, hipReadModeElementType> harrisDyTex(0, hipFilterModePoint, hipAddressModeClamp);
__global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
for (int j = jbegin; j < jend; ++j)
{
float dx = tex2D(harrisDxTex, j, i);
float dy = tex2D(harrisDyTex, j, i);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
}
}
template <typename BR, typename BC>
__global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst, const BR border_row, const BC border_col)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
const int y = border_col.idx_row(i);
for (int j = jbegin; j < jend; ++j)
{
const int x = border_row.idx_col(j);
float dx = tex2D(harrisDxTex, x, y);
float dy = tex2D(harrisDyTex, x, y);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
}
}
void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
bindTexture(&harrisDxTex, Dx);
bindTexture(&harrisDyTex, Dy);
switch (border_type)
{
case BORDER_REFLECT101:
hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
break;
case BORDER_REFLECT:
hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
break;
case BORDER_REPLICATE:
hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst);
break;
}
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
/////////////////////////////////////////// Corner Min Eigen Val /////////////////////////////////////////////////
texture<float, hipTextureType2D, hipReadModeElementType> minEigenValDxTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<float, hipTextureType2D, hipReadModeElementType> minEigenValDyTex(0, hipFilterModePoint, hipAddressModeClamp);
__global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
for (int j = jbegin; j < jend; ++j)
{
float dx = tex2D(minEigenValDxTex, j, i);
float dy = tex2D(minEigenValDyTex, j, i);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
a *= 0.5f;
c *= 0.5f;
dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
}
}
template <typename BR, typename BC>
__global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst, const BR border_row, const BC border_col)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
int y = border_col.idx_row(i);
for (int j = jbegin; j < jend; ++j)
{
int x = border_row.idx_col(j);
float dx = tex2D(minEigenValDxTex, x, y);
float dy = tex2D(minEigenValDyTex, x, y);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
a *= 0.5f;
c *= 0.5f;
dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
}
}
void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
bindTexture(&minEigenValDxTex, Dx);
bindTexture(&minEigenValDyTex, Dy);
switch (border_type)
{
case BORDER_REFLECT101:
hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
break;
case BORDER_REFLECT:
hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
break;
case BORDER_REPLICATE:
hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst);
break;
}
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
}
}}}
#endif // HAVE_OPENCV_GPUFILTERS
#endif // CUDA_DISABLER
| afdad6c9e2b92ce61252c2595d9c1a9da1a14a90.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPUFILTERS
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
/////////////////////////////////////////// Corner Harris /////////////////////////////////////////////////
texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDxTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDyTex(0, cudaFilterModePoint, cudaAddressModeClamp);
__global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
for (int j = jbegin; j < jend; ++j)
{
float dx = tex2D(harrisDxTex, j, i);
float dy = tex2D(harrisDyTex, j, i);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
}
}
template <typename BR, typename BC>
__global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst, const BR border_row, const BC border_col)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
const int y = border_col.idx_row(i);
for (int j = jbegin; j < jend; ++j)
{
const int x = border_row.idx_col(j);
float dx = tex2D(harrisDxTex, x, y);
float dy = tex2D(harrisDyTex, x, y);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
}
}
void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
bindTexture(&harrisDxTex, Dx);
bindTexture(&harrisDyTex, Dy);
switch (border_type)
{
case BORDER_REFLECT101:
cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
break;
case BORDER_REFLECT:
cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
break;
case BORDER_REPLICATE:
cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst);
break;
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
/////////////////////////////////////////// Corner Min Eigen Val /////////////////////////////////////////////////
texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDxTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDyTex(0, cudaFilterModePoint, cudaAddressModeClamp);
__global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
for (int j = jbegin; j < jend; ++j)
{
float dx = tex2D(minEigenValDxTex, j, i);
float dy = tex2D(minEigenValDyTex, j, i);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
a *= 0.5f;
c *= 0.5f;
dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
}
}
template <typename BR, typename BC>
__global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst, const BR border_row, const BC border_col)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
float a = 0.f;
float b = 0.f;
float c = 0.f;
const int ibegin = y - (block_size / 2);
const int jbegin = x - (block_size / 2);
const int iend = ibegin + block_size;
const int jend = jbegin + block_size;
for (int i = ibegin; i < iend; ++i)
{
int y = border_col.idx_row(i);
for (int j = jbegin; j < jend; ++j)
{
int x = border_row.idx_col(j);
float dx = tex2D(minEigenValDxTex, x, y);
float dy = tex2D(minEigenValDyTex, x, y);
a += dx * dx;
b += dx * dy;
c += dy * dy;
}
}
a *= 0.5f;
c *= 0.5f;
dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
}
}
void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
bindTexture(&minEigenValDxTex, Dx);
bindTexture(&minEigenValDyTex, Dy);
switch (border_type)
{
case BORDER_REFLECT101:
cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
break;
case BORDER_REFLECT:
cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
break;
case BORDER_REPLICATE:
cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst);
break;
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
}
}}}
#endif // HAVE_OPENCV_GPUFILTERS
#endif // CUDA_DISABLER
|
b7348d16c43bd88aa45b8c932d24f0991b434792.hip | // !!! This is a file automatically generated by hipify!!!
// ------------------------------------------------------------------
// Deformable Convolutional Networks
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN
// (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include "spconv_utils/nms.cuh"
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename DType>
__device__ inline DType devIoU(DType const *const a, DType const *const b) {
DType left = max(a[0], b[0]), right = min(a[2], b[2]);
DType top = max(a[1], b[1]), bottom = min(a[3], b[3]);
DType width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
DType interS = width * height;
DType Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
DType Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
template <typename DType, int BLOCK_THREADS>
__global__ void nms_kernel(const int n_boxes, const DType nms_overlap_thresh,
const DType *dev_boxes,
unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = min(n_boxes - row_start * BLOCK_THREADS, BLOCK_THREADS);
const int col_size = min(n_boxes - col_start * BLOCK_THREADS, BLOCK_THREADS);
__shared__ DType block_boxes[BLOCK_THREADS * 5];
if (threadIdx.x < col_size) {
#pragma unroll
for (int i = 0; i < 5; ++i) {
block_boxes[threadIdx.x * 5 + i] =
dev_boxes[(BLOCK_THREADS * col_start + threadIdx.x) * 5 + i];
}
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = BLOCK_THREADS * row_start + threadIdx.x;
const DType *cur_box = dev_boxes + cur_box_idx * 5;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (int i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, BLOCK_THREADS);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
template <typename DType, int BLOCK_THREADS>
int _nms_gpu(int *keep_out, const DType *boxes_host, int boxes_num,
int boxes_dim, DType nms_overlap_thresh, int device_id) {
_set_device(device_id);
DType *boxes_dev = NULL;
unsigned long long *mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, BLOCK_THREADS);
CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(DType)));
CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host,
boxes_num * boxes_dim * sizeof(DType),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, BLOCK_THREADS), DIVUP(boxes_num, BLOCK_THREADS));
dim3 threads(BLOCK_THREADS);
hipLaunchKernelGGL(( nms_kernel<DType, BLOCK_THREADS>)
, dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / BLOCK_THREADS;
int inblock = i % BLOCK_THREADS;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
return num_to_keep;
}
// template<>
template int _nms_gpu<float, threadsPerBlock>(int *keep_out,
const float *boxes_host,
int boxes_num, int boxes_dim,
float nms_overlap_thresh,
int device_id);
// template<>
template int _nms_gpu<double, threadsPerBlock>(int *keep_out,
const double *boxes_host,
int boxes_num, int boxes_dim,
double nms_overlap_thresh,
int device_id); | b7348d16c43bd88aa45b8c932d24f0991b434792.cu | // ------------------------------------------------------------------
// Deformable Convolutional Networks
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN
// (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include "spconv_utils/nms.cuh"
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename DType>
__device__ inline DType devIoU(DType const *const a, DType const *const b) {
DType left = max(a[0], b[0]), right = min(a[2], b[2]);
DType top = max(a[1], b[1]), bottom = min(a[3], b[3]);
DType width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
DType interS = width * height;
DType Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
DType Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
template <typename DType, int BLOCK_THREADS>
__global__ void nms_kernel(const int n_boxes, const DType nms_overlap_thresh,
const DType *dev_boxes,
unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = min(n_boxes - row_start * BLOCK_THREADS, BLOCK_THREADS);
const int col_size = min(n_boxes - col_start * BLOCK_THREADS, BLOCK_THREADS);
__shared__ DType block_boxes[BLOCK_THREADS * 5];
if (threadIdx.x < col_size) {
#pragma unroll
for (int i = 0; i < 5; ++i) {
block_boxes[threadIdx.x * 5 + i] =
dev_boxes[(BLOCK_THREADS * col_start + threadIdx.x) * 5 + i];
}
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = BLOCK_THREADS * row_start + threadIdx.x;
const DType *cur_box = dev_boxes + cur_box_idx * 5;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (int i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, BLOCK_THREADS);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
template <typename DType, int BLOCK_THREADS>
int _nms_gpu(int *keep_out, const DType *boxes_host, int boxes_num,
int boxes_dim, DType nms_overlap_thresh, int device_id) {
_set_device(device_id);
DType *boxes_dev = NULL;
unsigned long long *mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, BLOCK_THREADS);
CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(DType)));
CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host,
boxes_num * boxes_dim * sizeof(DType),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, BLOCK_THREADS), DIVUP(boxes_num, BLOCK_THREADS));
dim3 threads(BLOCK_THREADS);
nms_kernel<DType, BLOCK_THREADS>
<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / BLOCK_THREADS;
int inblock = i % BLOCK_THREADS;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
return num_to_keep;
}
// template<>
template int _nms_gpu<float, threadsPerBlock>(int *keep_out,
const float *boxes_host,
int boxes_num, int boxes_dim,
float nms_overlap_thresh,
int device_id);
// template<>
template int _nms_gpu<double, threadsPerBlock>(int *keep_out,
const double *boxes_host,
int boxes_num, int boxes_dim,
double nms_overlap_thresh,
int device_id); |
c917c0b35682e735d7a51d15f15699de9f3bc3ec.hip | // !!! This is a file automatically generated by hipify!!!
/*
Troca os valores de posio em um vetor (inverte os valores no vetor).
Exemplo da necessidade da sincronizao de threads de um bloco.
Exemplo para alocao dinmica e esttica de shared mem
Quando a funo __syncthreads() no kernel est comentada, o resultado fica errado.
Os if's nos for's das sadas dos resultados mostram os casos errados.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 512
__global__ void staticReverse(int *vetD_glb)
{
__shared__ int vetD_shd[N];
int t = threadIdx.x;
int tr = N-t-1;
vetD_shd[t] = vetD_glb[t];
// __syncthreads();
vetD_glb[t] = vetD_shd[tr];
}
__global__ void dynamicReverse(int *vetD_glb)
{
extern __shared__ int vetD_shd[];
int t = threadIdx.x;
int tr = N-t-1;
vetD_shd[t] = vetD_glb[t];
//__syncthreads();
vetD_glb[t] = vetD_shd[tr];
}
int main(void)
{
int vetA_h[N], vetCtrl_h[N], vetD_h[N];
int *vetD_d;
int i;
for (i = 0; i < N; i++) {
vetA_h[i] = i; // source
vetCtrl_h[i] = N-i-1; // just to check our results
vetD_h[i] = 0; // target
}
hipMalloc(&vetD_d, N * sizeof(int));
// copy vet a into device global memory
hipMemcpy(vetD_d, vetA_h, N*sizeof(int), hipMemcpyHostToDevice);
// run version with static shared memory
hipLaunchKernelGGL(( staticReverse), dim3(1),dim3(N), 0, 0, vetD_d);
// copy results from device to host memory
hipMemcpy(vetD_h, vetD_d, N*sizeof(int), hipMemcpyDeviceToHost);
printf("Static Results(%d): ", N);
for (i = 0; i < N; i++)
if (vetD_h[i] != vetCtrl_h[i])
printf("vetD_h[%d]=%d, vetCtrl_h[%d]=%d ", i, vetD_h[i], i, vetCtrl_h[i]);
printf("\nN=%d \n", N);
// *************************************************
// copy again vet a into device global memory
hipMemcpy(vetD_d, vetA_h, N*sizeof(int), hipMemcpyHostToDevice);
// run dynamic shared memory version
hipLaunchKernelGGL(( dynamicReverse), dim3(1),dim3(N),N*sizeof(int), 0, vetD_d);
// copy results from device to host memory
hipMemcpy(vetD_h, vetD_d, N * sizeof(int), hipMemcpyDeviceToHost);
printf("Dynamic Results(%d): ", N);
for (i = 0; i < N; i++)
if (vetD_h[i] != vetCtrl_h[i])
printf("vetD_h[%d]=%d, vetCtrl_h[%d]=%d ", i, vetD_h[i], i, vetCtrl_h[i]);
printf("\nN=%d \n", N);
// device memory free!!!!
hipFree(vetD_d);
exit(0);
}
| c917c0b35682e735d7a51d15f15699de9f3bc3ec.cu | /*
Troca os valores de posição em um vetor (inverte os valores no vetor).
Exemplo da necessidade da sincronização de threads de um bloco.
Exemplo para alocação dinâmica e estática de shared mem
Quando a função __syncthreads() no kernel está comentada, o resultado fica errado.
Os if's nos for's das saídas dos resultados mostram os casos errados.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 512
__global__ void staticReverse(int *vetD_glb)
{
__shared__ int vetD_shd[N];
int t = threadIdx.x;
int tr = N-t-1;
vetD_shd[t] = vetD_glb[t];
// __syncthreads();
vetD_glb[t] = vetD_shd[tr];
}
__global__ void dynamicReverse(int *vetD_glb)
{
extern __shared__ int vetD_shd[];
int t = threadIdx.x;
int tr = N-t-1;
vetD_shd[t] = vetD_glb[t];
//__syncthreads();
vetD_glb[t] = vetD_shd[tr];
}
int main(void)
{
int vetA_h[N], vetCtrl_h[N], vetD_h[N];
int *vetD_d;
int i;
for (i = 0; i < N; i++) {
vetA_h[i] = i; // source
vetCtrl_h[i] = N-i-1; // just to check our results
vetD_h[i] = 0; // target
}
cudaMalloc(&vetD_d, N * sizeof(int));
// copy vet a into device global memory
cudaMemcpy(vetD_d, vetA_h, N*sizeof(int), cudaMemcpyHostToDevice);
// run version with static shared memory
staticReverse<<<1,N>>>(vetD_d);
// copy results from device to host memory
cudaMemcpy(vetD_h, vetD_d, N*sizeof(int), cudaMemcpyDeviceToHost);
printf("Static Results(%d): ", N);
for (i = 0; i < N; i++)
if (vetD_h[i] != vetCtrl_h[i])
printf("vetD_h[%d]=%d, vetCtrl_h[%d]=%d ", i, vetD_h[i], i, vetCtrl_h[i]);
printf("\nN=%d \n", N);
// *************************************************
// copy again vet a into device global memory
cudaMemcpy(vetD_d, vetA_h, N*sizeof(int), cudaMemcpyHostToDevice);
// run dynamic shared memory version
dynamicReverse<<<1,N,N*sizeof(int)>>>(vetD_d);
// copy results from device to host memory
cudaMemcpy(vetD_h, vetD_d, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Dynamic Results(%d): ", N);
for (i = 0; i < N; i++)
if (vetD_h[i] != vetCtrl_h[i])
printf("vetD_h[%d]=%d, vetCtrl_h[%d]=%d ", i, vetD_h[i], i, vetCtrl_h[i]);
printf("\nN=%d \n", N);
// device memory free!!!!
cudaFree(vetD_d);
exit(0);
}
|
08620b5a576fd35ee6659f0512bdaa9947604dac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "CUDACore/cudaCheck.h"
#include "CUDACore/prefixScan.h"
#include "CUDACore/requireDevices.h"
using namespace cms::hip;
template <typename T>
struct format_traits {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %d %d\n";
};
template <>
struct format_traits<float> {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %f %f\n";
};
template <typename T>
__global__ void testPrefixScan(uint32_t size) {
__shared__ T ws[warpSize];
__shared__ T c[1024];
__shared__ T co[1024];
int first = threadIdx.x;
for (uint32_t i = first; i < size; i += static_cast<uint32_t>(blockDim.x))
c[i] = 1;
__syncthreads();
blockPrefixScan(c, co, size, ws);
blockPrefixScan(c, size, ws);
assert(1 == c[0]);
assert(1 == co[0]);
for (uint32_t i = first + 1; i < size; i += static_cast<uint32_t>(blockDim.x)) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, static_cast<int>(blockDim.x), c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] == co[i]);
}
}
template <typename T>
__global__ void testWarpPrefixScan(uint32_t size) {
assert(size <= warpSize);
__shared__ T c[1024];
__shared__ T co[1024];
int i = threadIdx.x;
c[i] = 1;
__syncthreads();
warpPrefixScan(c, co, i);
warpPrefixScan(c, i);
__syncthreads();
assert(1 == c[0]);
assert(1 == co[0]);
if (i != 0) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, static_cast<int>(blockDim.x), c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] == co[i]);
}
}
__global__ void init(uint32_t *v, uint32_t val, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
v[i] = val;
if (i == 0)
printf("init\n");
}
__global__ void verify(uint32_t const *v, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
assert(v[i] == i + 1);
if (i == 0)
printf("verify\n");
}
int main() {
cms::hiptest::requireDevices();
std::cout << "warp level" << std::endl;
if (warpSize > 32) {
// std::cout << "warp 64" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(warpSize), 0, 0, 64);
cudaCheck(hipDeviceSynchronize());
}
// std::cout << "warp 32" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(warpSize), 0, 0, 32);
cudaCheck(hipDeviceSynchronize());
// std::cout << "warp 16" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(warpSize), 0, 0, 16);
cudaCheck(hipDeviceSynchronize());
// std::cout << "warp 5" << std::endl;
hipLaunchKernelGGL(( testWarpPrefixScan<int>), dim3(1), dim3(warpSize), 0, 0, 5);
cudaCheck(hipDeviceSynchronize());
std::cout << "block level" << std::endl;
for (int bs = warpSize; bs <= 1024; bs += warpSize) {
// std::cout << "bs " << bs << std::endl;
for (int j = 1; j <= 1024; ++j) {
// std::cout << j << std::endl;
hipLaunchKernelGGL(( testPrefixScan<uint16_t>), dim3(1), dim3(bs), 0, 0, j);
cudaCheck(hipDeviceSynchronize());
hipLaunchKernelGGL(( testPrefixScan<float>), dim3(1), dim3(bs), 0, 0, j);
cudaCheck(hipDeviceSynchronize());
}
}
cudaCheck(hipDeviceSynchronize());
int num_items = 200;
for (int ksize = 1; ksize < 4; ++ksize) {
// test multiblock
std::cout << "multiblok" << std::endl;
// Declare, allocate, and initialize device-accessible pointers for input and output
num_items *= 10;
uint32_t *d_in;
uint32_t *d_out1;
uint32_t *d_out2;
cudaCheck(hipMalloc(&d_in, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out1, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out2, num_items * sizeof(uint32_t)));
auto nthreads = 256;
auto nblocks = (num_items + nthreads - 1) / nthreads;
hipLaunchKernelGGL(( init), dim3(nblocks), dim3(nthreads), 0, 0, d_in, 1, num_items);
// the block counter
int32_t *d_pc;
cudaCheck(hipMalloc(&d_pc, sizeof(int32_t)));
cudaCheck(hipMemset(d_pc, 0, sizeof(int32_t)));
nthreads = 1024;
nblocks = (num_items + nthreads - 1) / nthreads;
std::cout << "launch multiBlockPrefixScan " << num_items << ' ' << nblocks << std::endl;
hipLaunchKernelGGL(( multiBlockPrefixScan), dim3(nblocks), dim3(nthreads), 4 * nblocks, 0, d_in, d_out1, num_items, d_pc);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( verify), dim3(nblocks), dim3(nthreads), 0, 0, d_out1, num_items);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
} // ksize
return 0;
}
| 08620b5a576fd35ee6659f0512bdaa9947604dac.cu | #include "hip/hip_runtime.h"
#include <iostream>
#include "CUDACore/cudaCheck.h"
#include "CUDACore/prefixScan.h"
#include "CUDACore/requireDevices.h"
using namespace cms::hip;
template <typename T>
struct format_traits {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %d %d\n";
};
template <>
struct format_traits<float> {
public:
static const constexpr char *failed_msg = "failed %d %d %d: %f %f\n";
};
template <typename T>
__global__ void testPrefixScan(uint32_t size) {
__shared__ T ws[warpSize];
__shared__ T c[1024];
__shared__ T co[1024];
int first = threadIdx.x;
for (uint32_t i = first; i < size; i += static_cast<uint32_t>(blockDim.x))
c[i] = 1;
__syncthreads();
blockPrefixScan(c, co, size, ws);
blockPrefixScan(c, size, ws);
assert(1 == c[0]);
assert(1 == co[0]);
for (uint32_t i = first + 1; i < size; i += static_cast<uint32_t>(blockDim.x)) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, static_cast<int>(blockDim.x), c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] == co[i]);
}
}
template <typename T>
__global__ void testWarpPrefixScan(uint32_t size) {
assert(size <= warpSize);
__shared__ T c[1024];
__shared__ T co[1024];
int i = threadIdx.x;
c[i] = 1;
__syncthreads();
warpPrefixScan(c, co, i);
warpPrefixScan(c, i);
__syncthreads();
assert(1 == c[0]);
assert(1 == co[0]);
if (i != 0) {
if (c[i] != c[i - 1] + 1)
printf(format_traits<T>::failed_msg, size, i, static_cast<int>(blockDim.x), c[i], c[i - 1]);
assert(c[i] == c[i - 1] + 1);
assert(c[i] == i + 1);
assert(c[i] == co[i]);
}
}
__global__ void init(uint32_t *v, uint32_t val, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
v[i] = val;
if (i == 0)
printf("init\n");
}
__global__ void verify(uint32_t const *v, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
assert(v[i] == i + 1);
if (i == 0)
printf("verify\n");
}
int main() {
cms::hiptest::requireDevices();
std::cout << "warp level" << std::endl;
if (warpSize > 32) {
// std::cout << "warp 64" << std::endl;
testWarpPrefixScan<int><<<1, warpSize, 0, 0>>>(64);
cudaCheck(hipDeviceSynchronize());
}
// std::cout << "warp 32" << std::endl;
testWarpPrefixScan<int><<<1, warpSize, 0, 0>>>(32);
cudaCheck(hipDeviceSynchronize());
// std::cout << "warp 16" << std::endl;
testWarpPrefixScan<int><<<1, warpSize, 0, 0>>>(16);
cudaCheck(hipDeviceSynchronize());
// std::cout << "warp 5" << std::endl;
testWarpPrefixScan<int><<<1, warpSize, 0, 0>>>(5);
cudaCheck(hipDeviceSynchronize());
std::cout << "block level" << std::endl;
for (int bs = warpSize; bs <= 1024; bs += warpSize) {
// std::cout << "bs " << bs << std::endl;
for (int j = 1; j <= 1024; ++j) {
// std::cout << j << std::endl;
testPrefixScan<uint16_t><<<1, bs, 0, 0>>>(j);
cudaCheck(hipDeviceSynchronize());
testPrefixScan<float><<<1, bs, 0, 0>>>(j);
cudaCheck(hipDeviceSynchronize());
}
}
cudaCheck(hipDeviceSynchronize());
int num_items = 200;
for (int ksize = 1; ksize < 4; ++ksize) {
// test multiblock
std::cout << "multiblok" << std::endl;
// Declare, allocate, and initialize device-accessible pointers for input and output
num_items *= 10;
uint32_t *d_in;
uint32_t *d_out1;
uint32_t *d_out2;
cudaCheck(hipMalloc(&d_in, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out1, num_items * sizeof(uint32_t)));
cudaCheck(hipMalloc(&d_out2, num_items * sizeof(uint32_t)));
auto nthreads = 256;
auto nblocks = (num_items + nthreads - 1) / nthreads;
init<<<nblocks, nthreads, 0, 0>>>(d_in, 1, num_items);
// the block counter
int32_t *d_pc;
cudaCheck(hipMalloc(&d_pc, sizeof(int32_t)));
cudaCheck(hipMemset(d_pc, 0, sizeof(int32_t)));
nthreads = 1024;
nblocks = (num_items + nthreads - 1) / nthreads;
std::cout << "launch multiBlockPrefixScan " << num_items << ' ' << nblocks << std::endl;
multiBlockPrefixScan<<<nblocks, nthreads, 4 * nblocks, 0>>>(d_in, d_out1, num_items, d_pc);
cudaCheck(hipGetLastError());
verify<<<nblocks, nthreads, 0, 0>>>(d_out1, num_items);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
} // ksize
return 0;
}
|
d9aba8ff23f4beb1daa8f8129dc9372a84c80c18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <helper_cuda.h>
using namespace std;
__global__ void kernel(float* a, int offset)
{
int i = offset + threadIdx.x + blockIdx.x * blockDim.x;
float x = (float)i;
float s = sinf(x);
float c = cosf(x);
a[i] = a[i] + sqrtf(s * s + c * c);
}
int main(int argc, char** argv)
{
const int blockSize = 256, nStreams = 4;
const int n = 1024;
const int streamSize = n / nStreams;
const int streamBytes = streamSize * sizeof(float);
const int bytes = n * sizeof(float);
// allocate pinned host memory and device memory
float *a, *d_a;
checkCudaErrors(hipHostMalloc((void**)&a, bytes)); // host pinned
checkCudaErrors(hipMalloc((void**)&d_a, bytes)); // device
// create events and streams
hipEvent_t start, stop;
hipStream_t stream[nStreams];
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
for (int i = 0; i < nStreams; ++i)
checkCudaErrors(hipStreamCreate(&stream[i]));
memset(a, 0, bytes);
checkCudaErrors(hipEventRecord(start));
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
checkCudaErrors(
hipMemcpyAsync(&d_a[offset], &a[offset], streamBytes, hipMemcpyHostToDevice, stream[i]));
hipLaunchKernelGGL(( kernel), dim3(streamSize / blockSize), dim3(blockSize), 0, stream[i], d_a, offset);
checkCudaErrors(
hipMemcpyAsync(&a[offset], &d_a[offset], streamBytes, hipMemcpyDeviceToHost, stream[i]));
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float milliseconds = 0.f;
checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop));
cout << "Runtime: " << milliseconds << " ms\n";
// cleanup
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
for (int i = 0; i < nStreams; ++i)
checkCudaErrors(hipStreamDestroy(stream[i]));
hipFree(d_a);
hipHostFree(a);
return 0;
}
| d9aba8ff23f4beb1daa8f8129dc9372a84c80c18.cu | #include <iostream>
#include <helper_cuda.h>
using namespace std;
__global__ void kernel(float* a, int offset)
{
int i = offset + threadIdx.x + blockIdx.x * blockDim.x;
float x = (float)i;
float s = sinf(x);
float c = cosf(x);
a[i] = a[i] + sqrtf(s * s + c * c);
}
int main(int argc, char** argv)
{
const int blockSize = 256, nStreams = 4;
const int n = 1024;
const int streamSize = n / nStreams;
const int streamBytes = streamSize * sizeof(float);
const int bytes = n * sizeof(float);
// allocate pinned host memory and device memory
float *a, *d_a;
checkCudaErrors(cudaMallocHost((void**)&a, bytes)); // host pinned
checkCudaErrors(cudaMalloc((void**)&d_a, bytes)); // device
// create events and streams
cudaEvent_t start, stop;
cudaStream_t stream[nStreams];
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
for (int i = 0; i < nStreams; ++i)
checkCudaErrors(cudaStreamCreate(&stream[i]));
memset(a, 0, bytes);
checkCudaErrors(cudaEventRecord(start));
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
checkCudaErrors(
cudaMemcpyAsync(&d_a[offset], &a[offset], streamBytes, cudaMemcpyHostToDevice, stream[i]));
kernel<<<streamSize / blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
checkCudaErrors(
cudaMemcpyAsync(&a[offset], &d_a[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i]));
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float milliseconds = 0.f;
checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop));
cout << "Runtime: " << milliseconds << " ms\n";
// cleanup
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
for (int i = 0; i < nStreams; ++i)
checkCudaErrors(cudaStreamDestroy(stream[i]));
cudaFree(d_a);
cudaFreeHost(a);
return 0;
}
|
5108e834f373789da71474c2572379e115fc0f37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <cmath>
#include <string>
#include <omp.h>
#include <thread>
#include "diff1d.h"
#include "cuda_helper.h"
#define value_t double
#define index_t int
// constants
__constant__ value_t c_zero, c_one, c_two;
__global__ void kernel(index_t n, value_t r, value_t *u, value_t *u_new)
{
index_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < n)
{
if (j == 0)
u_new[j] = c_zero;
else if (j == n - 1)
u_new[j] = c_zero;
else
u_new[j] = (c_one - c_two * r) * u[j] + r * (u[j - 1] + u[j + 1]);
}
}
struct diff1d_l2 : public diff1d<value_t, index_t>
{
void benchmark()
{
print_bench();
value_t *u = new value_t[total_size];
value_t *u_new = new value_t[total_size];
initial_condition(u, u_new);
value_t *d_u, *d_u_new;
checkCudaErrors(hipMalloc(&d_u, total_size * sizeof(value_t)));
checkCudaErrors(hipMalloc(&d_u_new, total_size * sizeof(value_t)));
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipMemcpy(d_u, u, total_size * sizeof(value_t), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_u_new, u_new, total_size * sizeof(value_t), hipMemcpyHostToDevice));
value_t zero = 0.0;
value_t one = 1.0;
value_t two = 2.0;
checkCudaErrors(hipMemcpyToSymbol(c_zero, &zero, sizeof(value_t), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(c_one, &one, sizeof(value_t), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(c_two, &two, sizeof(value_t), 0, hipMemcpyHostToDevice));
dim3 blockd3 = dim3(block, 1, 1);
dim3 grid = calc_grid1d(blockd3, total_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, total_size, r, d_u, d_u_new);
checkCudaErrorsAfterKernels;
// swap u and u_new
value_t *tmp = d_u;
d_u = d_u_new;
d_u_new = tmp;
loops++;
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float du = 0;
checkCudaErrors(hipEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(hipMemcpy(u, d_u, total_size * sizeof(value_t), hipMemcpyDeviceToHost));
value_t t = delta_t * value_t(loops);
test_result(u, t);
print_performance();
delete[] u;
delete[] u_new;
checkCudaErrors(hipFree(d_u));
checkCudaErrors(hipFree(d_u_new));
}
diff1d_l2(int narg, char **arg) : diff1d<value_t, index_t>(narg, arg) {}
};
int main(int narg, char **arg)
{
check_cuda_device();
diff1d_l2 test(narg, arg);
test.benchmark();
} | 5108e834f373789da71474c2572379e115fc0f37.cu | #include <chrono>
#include <cmath>
#include <string>
#include <omp.h>
#include <thread>
#include "diff1d.h"
#include "cuda_helper.h"
#define value_t double
#define index_t int
// constants
__constant__ value_t c_zero, c_one, c_two;
__global__ void kernel(index_t n, value_t r, value_t *u, value_t *u_new)
{
index_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < n)
{
if (j == 0)
u_new[j] = c_zero;
else if (j == n - 1)
u_new[j] = c_zero;
else
u_new[j] = (c_one - c_two * r) * u[j] + r * (u[j - 1] + u[j + 1]);
}
}
struct diff1d_l2 : public diff1d<value_t, index_t>
{
void benchmark()
{
print_bench();
value_t *u = new value_t[total_size];
value_t *u_new = new value_t[total_size];
initial_condition(u, u_new);
value_t *d_u, *d_u_new;
checkCudaErrors(cudaMalloc(&d_u, total_size * sizeof(value_t)));
checkCudaErrors(cudaMalloc(&d_u_new, total_size * sizeof(value_t)));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaMemcpy(d_u, u, total_size * sizeof(value_t), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_u_new, u_new, total_size * sizeof(value_t), cudaMemcpyHostToDevice));
value_t zero = 0.0;
value_t one = 1.0;
value_t two = 2.0;
checkCudaErrors(cudaMemcpyToSymbol(c_zero, &zero, sizeof(value_t), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(c_one, &one, sizeof(value_t), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(c_two, &two, sizeof(value_t), 0, cudaMemcpyHostToDevice));
dim3 blockd3 = dim3(block, 1, 1);
dim3 grid = calc_grid1d(blockd3, total_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
kernel<<<grid, block>>>(total_size, r, d_u, d_u_new);
checkCudaErrorsAfterKernels;
// swap u and u_new
value_t *tmp = d_u;
d_u = d_u_new;
d_u_new = tmp;
loops++;
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float du = 0;
checkCudaErrors(cudaEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(cudaMemcpy(u, d_u, total_size * sizeof(value_t), cudaMemcpyDeviceToHost));
value_t t = delta_t * value_t(loops);
test_result(u, t);
print_performance();
delete[] u;
delete[] u_new;
checkCudaErrors(cudaFree(d_u));
checkCudaErrors(cudaFree(d_u_new));
}
diff1d_l2(int narg, char **arg) : diff1d<value_t, index_t>(narg, arg) {}
};
int main(int narg, char **arg)
{
check_cuda_device();
diff1d_l2 test(narg, arg);
test.benchmark();
} |
1ec267c378b71f56645104b7f4a0f3d0eaf2e8f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <jni.h>
#include "../../../../src/common/device_helpers.cuh"
#include "../../../../src/common/cuda_pinned_allocator.h"
#include "../../../../src/data/array_interface.h"
#include "jvm_utils.h"
#include <xgboost/c_api.h>
namespace xgboost {
namespace jni {
template <typename T, typename Alloc>
T const *RawPtr(std::vector<T, Alloc> const &data) {
return data.data();
}
template <typename T, typename Alloc> T *RawPtr(std::vector<T, Alloc> &data) {
return data.data();
}
template <typename T> T const *RawPtr(dh::device_vector<T> const &data) {
return data.data().get();
}
template <typename T> T *RawPtr(dh::device_vector<T> &data) {
return data.data().get();
}
template <typename T> T CheckJvmCall(T const &v, JNIEnv *jenv) {
if (!v) {
CHECK(jenv->ExceptionOccurred());
jenv->ExceptionDescribe();
}
return v;
}
template <typename VCont>
void CopyColumnMask(xgboost::ArrayInterface<1> const &interface,
std::vector<Json> const &columns, hipMemcpyKind kind,
size_t c, VCont *p_mask, Json *p_out, hipStream_t stream) {
auto &mask = *p_mask;
auto &out = *p_out;
auto size = sizeof(typename VCont::value_type) * interface.n;
mask.resize(size);
CHECK(RawPtr(mask));
CHECK(size);
CHECK(interface.valid.Data());
dh::safe_cuda(
hipMemcpyAsync(RawPtr(mask), interface.valid.Data(), size, kind, stream));
auto const &mask_column = columns[c]["mask"];
out["mask"] = Object();
std::vector<Json> mask_data{
Json{reinterpret_cast<Integer::Int>(RawPtr(mask))},
Json{get<Boolean const>(mask_column["data"][1])}};
out["mask"]["data"] = Array(std::move(mask_data));
if (get<Array const>(mask_column["shape"]).size() == 2) {
std::vector<Json> mask_shape{
Json{get<Integer const>(mask_column["shape"][0])},
Json{get<Integer const>(mask_column["shape"][1])}};
out["mask"]["shape"] = Array(std::move(mask_shape));
} else if (get<Array const>(mask_column["shape"]).size() == 1) {
std::vector<Json> mask_shape{
Json{get<Integer const>(mask_column["shape"][0])}};
out["mask"]["shape"] = Array(std::move(mask_shape));
} else {
LOG(FATAL) << "Invalid shape of mask";
}
out["mask"]["typestr"] = String("<t1");
out["mask"]["version"] = Integer(3);
}
template <typename DCont, typename VCont>
void CopyInterface(std::vector<xgboost::ArrayInterface<1>> &interface_arr,
std::vector<Json> const &columns, hipMemcpyKind kind,
std::vector<DCont> *p_data, std::vector<VCont> *p_mask,
std::vector<xgboost::Json> *p_out, hipStream_t stream) {
p_data->resize(interface_arr.size());
p_mask->resize(interface_arr.size());
p_out->resize(interface_arr.size());
for (size_t c = 0; c < interface_arr.size(); ++c) {
auto &interface = interface_arr.at(c);
size_t element_size = interface.ElementSize();
size_t size = element_size * interface.n;
auto &data = (*p_data)[c];
auto &mask = (*p_mask)[c];
data.resize(size);
dh::safe_cuda(hipMemcpyAsync(RawPtr(data), interface.data, size, kind, stream));
auto &out = (*p_out)[c];
out = Object();
std::vector<Json> j_data{
Json{Integer(reinterpret_cast<Integer::Int>(RawPtr(data)))},
Json{Boolean{false}}};
out["data"] = Array(std::move(j_data));
out["shape"] = Array(std::vector<Json>{Json(Integer(interface.Shape(0)))});
if (interface.valid.Data()) {
CopyColumnMask(interface, columns, kind, c, &mask, &out, stream);
}
out["typestr"] = String("<f4");
out["version"] = Integer(3);
}
}
void CopyMetaInfo(Json *p_interface, dh::device_vector<float> *out, hipStream_t stream) {
auto &j_interface = *p_interface;
CHECK_EQ(get<Array const>(j_interface).size(), 1);
auto object = get<Object>(get<Array>(j_interface)[0]);
ArrayInterface<1> interface(object);
out->resize(interface.Shape(0));
size_t element_size = interface.ElementSize();
size_t size = element_size * interface.n;
dh::safe_cuda(hipMemcpyAsync(RawPtr(*out), interface.data, size,
hipMemcpyDeviceToDevice, stream));
j_interface[0]["data"][0] = reinterpret_cast<Integer::Int>(RawPtr(*out));
}
template <typename DCont, typename VCont> struct DataFrame {
std::vector<DCont> data;
std::vector<VCont> valid;
std::vector<Json> interfaces;
};
class DataIteratorProxy {
DMatrixHandle proxy_;
JNIEnv *jenv_;
int jni_status_;
jobject jiter_;
bool cache_on_host_{true}; // TODO(Bobby): Make this optional.
template <typename T>
using Alloc = xgboost::common::cuda::pinned_allocator<T>;
template <typename U>
using HostVector = std::vector<U, Alloc<U>>;
// This vector is created for staging device data on host to save GPU memory.
// When space is not of concern, we can stage them on device memory directly.
std::vector<
std::unique_ptr<DataFrame<HostVector<char>, HostVector<std::uint8_t>>>>
host_columns_;
// TODO(Bobby): Use this instead of `host_columns_` if staging is not
// required.
std::vector<std::unique_ptr<DataFrame<dh::device_vector<char>,
dh::device_vector<std::uint8_t>>>>
device_columns_;
// Staging area for metainfo.
// TODO(Bobby): label_upper_bound, label_lower_bound, group.
std::vector<std::unique_ptr<dh::device_vector<float>>> labels_;
std::vector<std::unique_ptr<dh::device_vector<float>>> weights_;
std::vector<std::unique_ptr<dh::device_vector<float>>> base_margins_;
std::vector<Json> label_interfaces_;
std::vector<Json> weight_interfaces_;
std::vector<Json> margin_interfaces_;
size_t it_{0};
size_t n_batches_{0};
bool initialized_{false};
jobject last_batch_ {nullptr};
// Temp buffer on device, each `dh::device_vector` represents a column
// from cudf.
std::vector<dh::device_vector<char>> staging_data_;
std::vector<dh::device_vector<uint8_t>> staging_mask_;
hipStream_t copy_stream_;
public:
explicit DataIteratorProxy(jobject jiter, bool cache_on_host = true)
: jiter_{jiter}, cache_on_host_{cache_on_host} {
XGProxyDMatrixCreate(&proxy_);
jni_status_ =
GlobalJvm()->GetEnv(reinterpret_cast<void **>(&jenv_), JNI_VERSION_1_6);
this->Reset();
dh::safe_cuda(hipStreamCreateWithFlags(©_stream_, hipStreamNonBlocking));
}
~DataIteratorProxy() { XGDMatrixFree(proxy_);
dh::safe_cuda(hipStreamDestroy(copy_stream_));
}
DMatrixHandle GetDMatrixHandle() const { return proxy_; }
// Helper function for staging meta info.
void StageMetaInfo(Json json_interface) {
CHECK(!IsA<Null>(json_interface));
auto json_map = get<Object const>(json_interface);
if (json_map.find("label_str") == json_map.cend()) {
LOG(FATAL) << "Must have a label field.";
}
Json label = json_interface["label_str"];
CHECK(!IsA<Null>(label));
labels_.emplace_back(new dh::device_vector<float>);
CopyMetaInfo(&label, labels_.back().get(), copy_stream_);
label_interfaces_.emplace_back(label);
std::string str;
Json::Dump(label, &str);
XGDMatrixSetInfoFromInterface(proxy_, "label", str.c_str());
if (json_map.find("weight_str") != json_map.cend()) {
Json weight = json_interface["weight_str"];
CHECK(!IsA<Null>(weight));
weights_.emplace_back(new dh::device_vector<float>);
CopyMetaInfo(&weight, weights_.back().get(), copy_stream_);
weight_interfaces_.emplace_back(weight);
Json::Dump(weight, &str);
XGDMatrixSetInfoFromInterface(proxy_, "weight", str.c_str());
}
if (json_map.find("basemargin_str") != json_map.cend()) {
Json basemargin = json_interface["basemargin_str"];
base_margins_.emplace_back(new dh::device_vector<float>);
CopyMetaInfo(&basemargin, base_margins_.back().get(), copy_stream_);
margin_interfaces_.emplace_back(basemargin);
Json::Dump(basemargin, &str);
XGDMatrixSetInfoFromInterface(proxy_, "base_margin", str.c_str());
}
}
void CloseJvmBatch() {
if (last_batch_) {
jclass batch_class = CheckJvmCall(jenv_->GetObjectClass(last_batch_), jenv_);
jmethodID closeMethod = CheckJvmCall(jenv_->GetMethodID(batch_class, "close", "()V"), jenv_);
jenv_->CallVoidMethod(last_batch_, closeMethod);
last_batch_ = nullptr;
}
}
void Reset() {
it_ = 0;
this->CloseJvmBatch();
}
int32_t PullIterFromJVM() {
jclass iterClass = jenv_->FindClass("java/util/Iterator");
this->CloseJvmBatch();
jmethodID has_next =
CheckJvmCall(jenv_->GetMethodID(iterClass, "hasNext", "()Z"), jenv_);
jmethodID next = CheckJvmCall(
jenv_->GetMethodID(iterClass, "next", "()Ljava/lang/Object;"), jenv_);
if (jenv_->CallBooleanMethod(jiter_, has_next)) {
// batch should be ColumnBatch from jvm
jobject batch = CheckJvmCall(jenv_->CallObjectMethod(jiter_, next), jenv_);
jclass batch_class = CheckJvmCall(jenv_->GetObjectClass(batch), jenv_);
jmethodID getArrayInterfaceJson = CheckJvmCall(jenv_->GetMethodID(
batch_class, "getArrayInterfaceJson", "()Ljava/lang/String;"), jenv_);
auto jinterface =
static_cast<jstring>(jenv_->CallObjectMethod(batch, getArrayInterfaceJson));
CheckJvmCall(jinterface, jenv_);
char const *c_interface_str =
CheckJvmCall(jenv_->GetStringUTFChars(jinterface, nullptr), jenv_);
StageData(c_interface_str);
jenv_->ReleaseStringUTFChars(jinterface, c_interface_str);
last_batch_ = batch;
return 1;
} else {
return 0;
}
}
void StageData(std::string interface_str) {
++n_batches_;
// DataFrame
using T = decltype(host_columns_)::value_type::element_type;
host_columns_.emplace_back(std::unique_ptr<T>(new T));
// Stage the meta info.
auto json_interface =
Json::Load({interface_str.c_str(), interface_str.size()});
CHECK(!IsA<Null>(json_interface));
StageMetaInfo(json_interface);
Json features = json_interface["features_str"];
auto json_columns = get<Array const>(features);
std::vector<ArrayInterface<1>> interfaces;
// Stage the data
for (auto &json_col : json_columns) {
auto column = ArrayInterface<1>(get<Object const>(json_col));
interfaces.emplace_back(column);
}
Json::Dump(features, &interface_str);
CopyInterface(interfaces, json_columns, hipMemcpyDeviceToHost,
&host_columns_.back()->data, &host_columns_.back()->valid,
&host_columns_.back()->interfaces, copy_stream_);
XGProxyDMatrixSetDataCudaColumnar(proxy_, interface_str.c_str());
it_++;
}
int NextFirstLoop() {
try {
dh::safe_cuda(hipStreamSynchronize(copy_stream_));
if (this->PullIterFromJVM()) {
return 1;
} else {
initialized_ = true;
return 0;
}
} catch (dmlc::Error const &e) {
if (jni_status_ == JNI_EDETACHED) {
GlobalJvm()->DetachCurrentThread();
}
LOG(FATAL) << e.what();
}
LOG(FATAL) << "Unreachable";
return 1;
}
int NextSecondLoop() {
std::string str;
// Meta
auto const &label = this->label_interfaces_.at(it_);
Json::Dump(label, &str);
XGDMatrixSetInfoFromInterface(proxy_, "label", str.c_str());
if (n_batches_ == this->weight_interfaces_.size()) {
auto const &weight = this->weight_interfaces_.at(it_);
Json::Dump(weight, &str);
XGDMatrixSetInfoFromInterface(proxy_, "weight", str.c_str());
}
if (n_batches_ == this->margin_interfaces_.size()) {
auto const &base_margin = this->margin_interfaces_.at(it_);
Json::Dump(base_margin, &str);
XGDMatrixSetInfoFromInterface(proxy_, "base_margin", str.c_str());
}
// Data
auto const &json_interface = host_columns_.at(it_)->interfaces;
std::vector<ArrayInterface<1>> in;
for (auto interface : json_interface) {
auto column = ArrayInterface<1>(get<Object const>(interface));
in.emplace_back(column);
}
std::vector<Json> out;
CopyInterface(in, json_interface, hipMemcpyHostToDevice, &staging_data_,
&staging_mask_, &out, nullptr);
Json temp{Array(std::move(out))};
std::string interface_str;
Json::Dump(temp, &interface_str);
XGProxyDMatrixSetDataCudaColumnar(proxy_, interface_str.c_str());
it_++;
return 1;
}
int Next() {
if (!initialized_) {
return NextFirstLoop();
} else {
if (it_ == n_batches_) {
return 0;
}
return NextSecondLoop();
}
};
};
namespace {
void Reset(DataIterHandle self) {
static_cast<xgboost::jni::DataIteratorProxy *>(self)->Reset();
}
int Next(DataIterHandle self) {
return static_cast<xgboost::jni::DataIteratorProxy *>(self)->Next();
}
} // anonymous namespace
XGB_DLL jint XGDeviceQuantileDMatrixCreateFromCallbackImpl(JNIEnv *jenv, jclass jcls,
jobject jiter,
jfloat jmissing,
jint jmax_bin, jint jnthread,
jlongArray jout) {
xgboost::jni::DataIteratorProxy proxy(jiter);
DMatrixHandle result;
auto ret = XGDeviceQuantileDMatrixCreateFromCallback(
&proxy, proxy.GetDMatrixHandle(), Reset, Next, jmissing, jnthread,
jmax_bin, &result);
setHandle(jenv, jout, result);
return ret;
}
} // namespace jni
} // namespace xgboost
| 1ec267c378b71f56645104b7f4a0f3d0eaf2e8f1.cu | #include <jni.h>
#include "../../../../src/common/device_helpers.cuh"
#include "../../../../src/common/cuda_pinned_allocator.h"
#include "../../../../src/data/array_interface.h"
#include "jvm_utils.h"
#include <xgboost/c_api.h>
namespace xgboost {
namespace jni {
template <typename T, typename Alloc>
T const *RawPtr(std::vector<T, Alloc> const &data) {
return data.data();
}
template <typename T, typename Alloc> T *RawPtr(std::vector<T, Alloc> &data) {
return data.data();
}
template <typename T> T const *RawPtr(dh::device_vector<T> const &data) {
return data.data().get();
}
template <typename T> T *RawPtr(dh::device_vector<T> &data) {
return data.data().get();
}
template <typename T> T CheckJvmCall(T const &v, JNIEnv *jenv) {
if (!v) {
CHECK(jenv->ExceptionOccurred());
jenv->ExceptionDescribe();
}
return v;
}
template <typename VCont>
void CopyColumnMask(xgboost::ArrayInterface<1> const &interface,
std::vector<Json> const &columns, cudaMemcpyKind kind,
size_t c, VCont *p_mask, Json *p_out, cudaStream_t stream) {
auto &mask = *p_mask;
auto &out = *p_out;
auto size = sizeof(typename VCont::value_type) * interface.n;
mask.resize(size);
CHECK(RawPtr(mask));
CHECK(size);
CHECK(interface.valid.Data());
dh::safe_cuda(
cudaMemcpyAsync(RawPtr(mask), interface.valid.Data(), size, kind, stream));
auto const &mask_column = columns[c]["mask"];
out["mask"] = Object();
std::vector<Json> mask_data{
Json{reinterpret_cast<Integer::Int>(RawPtr(mask))},
Json{get<Boolean const>(mask_column["data"][1])}};
out["mask"]["data"] = Array(std::move(mask_data));
if (get<Array const>(mask_column["shape"]).size() == 2) {
std::vector<Json> mask_shape{
Json{get<Integer const>(mask_column["shape"][0])},
Json{get<Integer const>(mask_column["shape"][1])}};
out["mask"]["shape"] = Array(std::move(mask_shape));
} else if (get<Array const>(mask_column["shape"]).size() == 1) {
std::vector<Json> mask_shape{
Json{get<Integer const>(mask_column["shape"][0])}};
out["mask"]["shape"] = Array(std::move(mask_shape));
} else {
LOG(FATAL) << "Invalid shape of mask";
}
out["mask"]["typestr"] = String("<t1");
out["mask"]["version"] = Integer(3);
}
template <typename DCont, typename VCont>
void CopyInterface(std::vector<xgboost::ArrayInterface<1>> &interface_arr,
std::vector<Json> const &columns, cudaMemcpyKind kind,
std::vector<DCont> *p_data, std::vector<VCont> *p_mask,
std::vector<xgboost::Json> *p_out, cudaStream_t stream) {
p_data->resize(interface_arr.size());
p_mask->resize(interface_arr.size());
p_out->resize(interface_arr.size());
for (size_t c = 0; c < interface_arr.size(); ++c) {
auto &interface = interface_arr.at(c);
size_t element_size = interface.ElementSize();
size_t size = element_size * interface.n;
auto &data = (*p_data)[c];
auto &mask = (*p_mask)[c];
data.resize(size);
dh::safe_cuda(cudaMemcpyAsync(RawPtr(data), interface.data, size, kind, stream));
auto &out = (*p_out)[c];
out = Object();
std::vector<Json> j_data{
Json{Integer(reinterpret_cast<Integer::Int>(RawPtr(data)))},
Json{Boolean{false}}};
out["data"] = Array(std::move(j_data));
out["shape"] = Array(std::vector<Json>{Json(Integer(interface.Shape(0)))});
if (interface.valid.Data()) {
CopyColumnMask(interface, columns, kind, c, &mask, &out, stream);
}
out["typestr"] = String("<f4");
out["version"] = Integer(3);
}
}
void CopyMetaInfo(Json *p_interface, dh::device_vector<float> *out, cudaStream_t stream) {
auto &j_interface = *p_interface;
CHECK_EQ(get<Array const>(j_interface).size(), 1);
auto object = get<Object>(get<Array>(j_interface)[0]);
ArrayInterface<1> interface(object);
out->resize(interface.Shape(0));
size_t element_size = interface.ElementSize();
size_t size = element_size * interface.n;
dh::safe_cuda(cudaMemcpyAsync(RawPtr(*out), interface.data, size,
cudaMemcpyDeviceToDevice, stream));
j_interface[0]["data"][0] = reinterpret_cast<Integer::Int>(RawPtr(*out));
}
template <typename DCont, typename VCont> struct DataFrame {
std::vector<DCont> data;
std::vector<VCont> valid;
std::vector<Json> interfaces;
};
class DataIteratorProxy {
DMatrixHandle proxy_;
JNIEnv *jenv_;
int jni_status_;
jobject jiter_;
bool cache_on_host_{true}; // TODO(Bobby): Make this optional.
template <typename T>
using Alloc = xgboost::common::cuda::pinned_allocator<T>;
template <typename U>
using HostVector = std::vector<U, Alloc<U>>;
// This vector is created for staging device data on host to save GPU memory.
// When space is not of concern, we can stage them on device memory directly.
std::vector<
std::unique_ptr<DataFrame<HostVector<char>, HostVector<std::uint8_t>>>>
host_columns_;
// TODO(Bobby): Use this instead of `host_columns_` if staging is not
// required.
std::vector<std::unique_ptr<DataFrame<dh::device_vector<char>,
dh::device_vector<std::uint8_t>>>>
device_columns_;
// Staging area for metainfo.
// TODO(Bobby): label_upper_bound, label_lower_bound, group.
std::vector<std::unique_ptr<dh::device_vector<float>>> labels_;
std::vector<std::unique_ptr<dh::device_vector<float>>> weights_;
std::vector<std::unique_ptr<dh::device_vector<float>>> base_margins_;
std::vector<Json> label_interfaces_;
std::vector<Json> weight_interfaces_;
std::vector<Json> margin_interfaces_;
size_t it_{0};
size_t n_batches_{0};
bool initialized_{false};
jobject last_batch_ {nullptr};
// Temp buffer on device, each `dh::device_vector` represents a column
// from cudf.
std::vector<dh::device_vector<char>> staging_data_;
std::vector<dh::device_vector<uint8_t>> staging_mask_;
cudaStream_t copy_stream_;
public:
explicit DataIteratorProxy(jobject jiter, bool cache_on_host = true)
: jiter_{jiter}, cache_on_host_{cache_on_host} {
XGProxyDMatrixCreate(&proxy_);
jni_status_ =
GlobalJvm()->GetEnv(reinterpret_cast<void **>(&jenv_), JNI_VERSION_1_6);
this->Reset();
dh::safe_cuda(cudaStreamCreateWithFlags(©_stream_, cudaStreamNonBlocking));
}
~DataIteratorProxy() { XGDMatrixFree(proxy_);
dh::safe_cuda(cudaStreamDestroy(copy_stream_));
}
DMatrixHandle GetDMatrixHandle() const { return proxy_; }
// Helper function for staging meta info.
void StageMetaInfo(Json json_interface) {
CHECK(!IsA<Null>(json_interface));
auto json_map = get<Object const>(json_interface);
if (json_map.find("label_str") == json_map.cend()) {
LOG(FATAL) << "Must have a label field.";
}
Json label = json_interface["label_str"];
CHECK(!IsA<Null>(label));
labels_.emplace_back(new dh::device_vector<float>);
CopyMetaInfo(&label, labels_.back().get(), copy_stream_);
label_interfaces_.emplace_back(label);
std::string str;
Json::Dump(label, &str);
XGDMatrixSetInfoFromInterface(proxy_, "label", str.c_str());
if (json_map.find("weight_str") != json_map.cend()) {
Json weight = json_interface["weight_str"];
CHECK(!IsA<Null>(weight));
weights_.emplace_back(new dh::device_vector<float>);
CopyMetaInfo(&weight, weights_.back().get(), copy_stream_);
weight_interfaces_.emplace_back(weight);
Json::Dump(weight, &str);
XGDMatrixSetInfoFromInterface(proxy_, "weight", str.c_str());
}
if (json_map.find("basemargin_str") != json_map.cend()) {
Json basemargin = json_interface["basemargin_str"];
base_margins_.emplace_back(new dh::device_vector<float>);
CopyMetaInfo(&basemargin, base_margins_.back().get(), copy_stream_);
margin_interfaces_.emplace_back(basemargin);
Json::Dump(basemargin, &str);
XGDMatrixSetInfoFromInterface(proxy_, "base_margin", str.c_str());
}
}
void CloseJvmBatch() {
if (last_batch_) {
jclass batch_class = CheckJvmCall(jenv_->GetObjectClass(last_batch_), jenv_);
jmethodID closeMethod = CheckJvmCall(jenv_->GetMethodID(batch_class, "close", "()V"), jenv_);
jenv_->CallVoidMethod(last_batch_, closeMethod);
last_batch_ = nullptr;
}
}
void Reset() {
it_ = 0;
this->CloseJvmBatch();
}
int32_t PullIterFromJVM() {
jclass iterClass = jenv_->FindClass("java/util/Iterator");
this->CloseJvmBatch();
jmethodID has_next =
CheckJvmCall(jenv_->GetMethodID(iterClass, "hasNext", "()Z"), jenv_);
jmethodID next = CheckJvmCall(
jenv_->GetMethodID(iterClass, "next", "()Ljava/lang/Object;"), jenv_);
if (jenv_->CallBooleanMethod(jiter_, has_next)) {
// batch should be ColumnBatch from jvm
jobject batch = CheckJvmCall(jenv_->CallObjectMethod(jiter_, next), jenv_);
jclass batch_class = CheckJvmCall(jenv_->GetObjectClass(batch), jenv_);
jmethodID getArrayInterfaceJson = CheckJvmCall(jenv_->GetMethodID(
batch_class, "getArrayInterfaceJson", "()Ljava/lang/String;"), jenv_);
auto jinterface =
static_cast<jstring>(jenv_->CallObjectMethod(batch, getArrayInterfaceJson));
CheckJvmCall(jinterface, jenv_);
char const *c_interface_str =
CheckJvmCall(jenv_->GetStringUTFChars(jinterface, nullptr), jenv_);
StageData(c_interface_str);
jenv_->ReleaseStringUTFChars(jinterface, c_interface_str);
last_batch_ = batch;
return 1;
} else {
return 0;
}
}
void StageData(std::string interface_str) {
++n_batches_;
// DataFrame
using T = decltype(host_columns_)::value_type::element_type;
host_columns_.emplace_back(std::unique_ptr<T>(new T));
// Stage the meta info.
auto json_interface =
Json::Load({interface_str.c_str(), interface_str.size()});
CHECK(!IsA<Null>(json_interface));
StageMetaInfo(json_interface);
Json features = json_interface["features_str"];
auto json_columns = get<Array const>(features);
std::vector<ArrayInterface<1>> interfaces;
// Stage the data
for (auto &json_col : json_columns) {
auto column = ArrayInterface<1>(get<Object const>(json_col));
interfaces.emplace_back(column);
}
Json::Dump(features, &interface_str);
CopyInterface(interfaces, json_columns, cudaMemcpyDeviceToHost,
&host_columns_.back()->data, &host_columns_.back()->valid,
&host_columns_.back()->interfaces, copy_stream_);
XGProxyDMatrixSetDataCudaColumnar(proxy_, interface_str.c_str());
it_++;
}
int NextFirstLoop() {
try {
dh::safe_cuda(cudaStreamSynchronize(copy_stream_));
if (this->PullIterFromJVM()) {
return 1;
} else {
initialized_ = true;
return 0;
}
} catch (dmlc::Error const &e) {
if (jni_status_ == JNI_EDETACHED) {
GlobalJvm()->DetachCurrentThread();
}
LOG(FATAL) << e.what();
}
LOG(FATAL) << "Unreachable";
return 1;
}
int NextSecondLoop() {
std::string str;
// Meta
auto const &label = this->label_interfaces_.at(it_);
Json::Dump(label, &str);
XGDMatrixSetInfoFromInterface(proxy_, "label", str.c_str());
if (n_batches_ == this->weight_interfaces_.size()) {
auto const &weight = this->weight_interfaces_.at(it_);
Json::Dump(weight, &str);
XGDMatrixSetInfoFromInterface(proxy_, "weight", str.c_str());
}
if (n_batches_ == this->margin_interfaces_.size()) {
auto const &base_margin = this->margin_interfaces_.at(it_);
Json::Dump(base_margin, &str);
XGDMatrixSetInfoFromInterface(proxy_, "base_margin", str.c_str());
}
// Data
auto const &json_interface = host_columns_.at(it_)->interfaces;
std::vector<ArrayInterface<1>> in;
for (auto interface : json_interface) {
auto column = ArrayInterface<1>(get<Object const>(interface));
in.emplace_back(column);
}
std::vector<Json> out;
CopyInterface(in, json_interface, cudaMemcpyHostToDevice, &staging_data_,
&staging_mask_, &out, nullptr);
Json temp{Array(std::move(out))};
std::string interface_str;
Json::Dump(temp, &interface_str);
XGProxyDMatrixSetDataCudaColumnar(proxy_, interface_str.c_str());
it_++;
return 1;
}
int Next() {
if (!initialized_) {
return NextFirstLoop();
} else {
if (it_ == n_batches_) {
return 0;
}
return NextSecondLoop();
}
};
};
namespace {
void Reset(DataIterHandle self) {
static_cast<xgboost::jni::DataIteratorProxy *>(self)->Reset();
}
int Next(DataIterHandle self) {
return static_cast<xgboost::jni::DataIteratorProxy *>(self)->Next();
}
} // anonymous namespace
XGB_DLL jint XGDeviceQuantileDMatrixCreateFromCallbackImpl(JNIEnv *jenv, jclass jcls,
jobject jiter,
jfloat jmissing,
jint jmax_bin, jint jnthread,
jlongArray jout) {
xgboost::jni::DataIteratorProxy proxy(jiter);
DMatrixHandle result;
auto ret = XGDeviceQuantileDMatrixCreateFromCallback(
&proxy, proxy.GetDMatrixHandle(), Reset, Next, jmissing, jnthread,
jmax_bin, &result);
setHandle(jenv, jout, result);
return ret;
}
} // namespace jni
} // namespace xgboost
|
0261271079973ef1354cd9cd8e10ce0f606cac92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <ctime>
#include <algorithm>
__global__ void scan(int n, int *d_in, int *d_out, int *d_temp){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int id = threadIdx.x;
int m = min(n, blockDim.x);
extern __shared__ int sdata[];
sdata[id] = d_in[index];
__syncthreads();
int step = 1;
while (step < m){
int cur = sdata[id];
int ileft = id - step;
if (ileft >= 0){
cur += sdata[ileft];
}
__syncthreads();
sdata[id] = cur;
__syncthreads();
step *= 2;
}
d_out[index] = sdata[id];
if (id == m-1){
d_temp[blockIdx.x] = sdata[id];
}
}
__global__ void add(int n, int *d_in, int *d_out, int *d_temp){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int id = blockIdx.x;
if (id == 0){
d_out[index] = d_in[index];
} else {
d_out[index] = d_in[index] + d_temp[id-1];
}
}
void scan(int n, int *h_in, int *h_out){
int blockSize = min(n, 512);
int numBlocks = (n + blockSize - 1) / blockSize;
int *temp;
hipMallocManaged(&temp, numBlocks*sizeof(int));
hipLaunchKernelGGL(( scan), dim3(numBlocks), dim3(blockSize), blockSize*sizeof(int), 0, n, h_in, h_out, temp);
hipDeviceSynchronize();
if (numBlocks > 1){
scan(numBlocks, temp, temp);
}
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, n, h_out, h_out, temp);
hipDeviceSynchronize();
hipFree(temp);
}
int main(){
std::clock_t startGPU, endGPU;
std::clock_t startCPU, endCPU;
int smallN = 1<<6;
int largeN = 1<<26;
// GPU version
int *x;
hipMallocManaged(&x, smallN*sizeof(int));
int *xout;
hipMallocManaged(&xout, smallN*sizeof(int));
int *y;
hipMallocManaged(&y, largeN*sizeof(int));
int *yout;
hipMallocManaged(&yout, largeN*sizeof(int));
for (int i=0; i<smallN; i++){
x[i] = (i+1);
}
for (int i=0; i<largeN; i++){
y[i] = 10;
}
startGPU = std::clock();
scan(smallN, x, xout);
scan(largeN, y, yout);
endGPU = std::clock();
// CPU version
startCPU = std::clock();
int *smallArr = new int[1<<6];
int *largeArr = new int[1<<26];
for (int i=0; i<smallN; i++){
smallArr[i] = (i+1);
if (i > 0) smallArr[i] += smallArr[i-1];
}
for (int i=0; i<largeN; i++){
largeArr[i] = 10;
if (i > 0) largeArr[i] += largeArr[i-1];
}
endCPU = std::clock();
// compare result:
for (int i=0; i<smallN; i++){
std::cout << x[i] << ": " << xout[i] << " " << smallArr[i] << std::endl;
}
for (int i=1; i<largeN; i*=10){
std::cout << i << ": " << yout[i] << " " << largeArr[i] << std::endl;
}
std::cout << "Scan: " << yout[largeN-1] << " " << largeArr[largeN-1] << std::endl;
// time
std::cout << "GPU Time: " << ((endGPU - startGPU) / (double) CLOCKS_PER_SEC) << std::endl;
std::cout << "CPU Time: " << ((endCPU - startCPU) / (double) CLOCKS_PER_SEC) << std::endl;
hipFree(x);
hipFree(xout);
hipFree(y);
hipFree(yout);
return 0;
}
| 0261271079973ef1354cd9cd8e10ce0f606cac92.cu | #include <iostream>
#include <math.h>
#include <ctime>
#include <algorithm>
__global__ void scan(int n, int *d_in, int *d_out, int *d_temp){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int id = threadIdx.x;
int m = min(n, blockDim.x);
extern __shared__ int sdata[];
sdata[id] = d_in[index];
__syncthreads();
int step = 1;
while (step < m){
int cur = sdata[id];
int ileft = id - step;
if (ileft >= 0){
cur += sdata[ileft];
}
__syncthreads();
sdata[id] = cur;
__syncthreads();
step *= 2;
}
d_out[index] = sdata[id];
if (id == m-1){
d_temp[blockIdx.x] = sdata[id];
}
}
__global__ void add(int n, int *d_in, int *d_out, int *d_temp){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int id = blockIdx.x;
if (id == 0){
d_out[index] = d_in[index];
} else {
d_out[index] = d_in[index] + d_temp[id-1];
}
}
void scan(int n, int *h_in, int *h_out){
int blockSize = min(n, 512);
int numBlocks = (n + blockSize - 1) / blockSize;
int *temp;
cudaMallocManaged(&temp, numBlocks*sizeof(int));
scan<<<numBlocks, blockSize, blockSize*sizeof(int)>>>(n, h_in, h_out, temp);
cudaDeviceSynchronize();
if (numBlocks > 1){
scan(numBlocks, temp, temp);
}
add<<<numBlocks, blockSize>>>(n, h_out, h_out, temp);
cudaDeviceSynchronize();
cudaFree(temp);
}
int main(){
std::clock_t startGPU, endGPU;
std::clock_t startCPU, endCPU;
int smallN = 1<<6;
int largeN = 1<<26;
// GPU version
int *x;
cudaMallocManaged(&x, smallN*sizeof(int));
int *xout;
cudaMallocManaged(&xout, smallN*sizeof(int));
int *y;
cudaMallocManaged(&y, largeN*sizeof(int));
int *yout;
cudaMallocManaged(&yout, largeN*sizeof(int));
for (int i=0; i<smallN; i++){
x[i] = (i+1);
}
for (int i=0; i<largeN; i++){
y[i] = 10;
}
startGPU = std::clock();
scan(smallN, x, xout);
scan(largeN, y, yout);
endGPU = std::clock();
// CPU version
startCPU = std::clock();
int *smallArr = new int[1<<6];
int *largeArr = new int[1<<26];
for (int i=0; i<smallN; i++){
smallArr[i] = (i+1);
if (i > 0) smallArr[i] += smallArr[i-1];
}
for (int i=0; i<largeN; i++){
largeArr[i] = 10;
if (i > 0) largeArr[i] += largeArr[i-1];
}
endCPU = std::clock();
// compare result:
for (int i=0; i<smallN; i++){
std::cout << x[i] << ": " << xout[i] << " " << smallArr[i] << std::endl;
}
for (int i=1; i<largeN; i*=10){
std::cout << i << ": " << yout[i] << " " << largeArr[i] << std::endl;
}
std::cout << "Scan: " << yout[largeN-1] << " " << largeArr[largeN-1] << std::endl;
// time
std::cout << "GPU Time: " << ((endGPU - startGPU) / (double) CLOCKS_PER_SEC) << std::endl;
std::cout << "CPU Time: " << ((endCPU - startCPU) / (double) CLOCKS_PER_SEC) << std::endl;
cudaFree(x);
cudaFree(xout);
cudaFree(y);
cudaFree(yout);
return 0;
}
|
4854295de95f9c0d7b5b8cb903e451773f7f3d68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <ncs/cuda/CUDA.h>
#include <ncs/cuda/FireTableUpdater.cuh>
#include <ncs/sim/CUDA.h>
namespace ncs {
namespace sim {
namespace cuda {
__global__ void updateTableKernel(ncs::sim::Bit::Word* neuron_fire_vector,
ncs::sim::Bit::Word* synapse_fire_table,
unsigned int synaptic_vector_size,
unsigned int row,
unsigned int num_rows,
unsigned int* presynaptic_neuron_ids,
unsigned int* synaptic_delays,
unsigned int num_synapses) {
unsigned int index = grid::thread();
unsigned int stride = grid::stride();
for (; index < num_synapses; index += stride) {
unsigned int pre_id = presynaptic_neuron_ids[index];
if (pre_id == 0xFFFFFFFF) {
continue;
}
unsigned int word = bit::word(pre_id);
unsigned int mask = bit::mask(pre_id);
if (neuron_fire_vector[word] & mask) {
unsigned int delay = synaptic_delays[index];
unsigned int event_row = row + delay;
if (event_row >= num_rows) {
event_row -= num_rows;
}
unsigned int event_mask = bit::mask(index);
unsigned int* event_word =
synapse_fire_table + event_row * synaptic_vector_size;
atomicOr(event_word + bit::word(index), event_mask);
}
}
}
void updateFireTable( Bit::Word* neuron_fire_vector,
Bit::Word* synapse_fire_table,
unsigned int synaptic_vector_size,
unsigned int row,
unsigned int num_rows,
unsigned int* presynaptic_neuron_ids,
unsigned int* synaptic_delays,
unsigned int num_synapses) {
hipLaunchKernelGGL(( updateTableKernel), dim3(CUDA::getNumberOfBlocks(num_synapses)),
dim3(CUDA::getThreadsPerBlock(num_synapses)),
0,
CUDA::getStream(), neuron_fire_vector,
synapse_fire_table,
synaptic_vector_size,
row,
num_rows,
presynaptic_neuron_ids,
synaptic_delays,
num_synapses);
CUDA::synchronize();
}
} // namespace cuda
} // namespace sim
} // namespace ncs
| 4854295de95f9c0d7b5b8cb903e451773f7f3d68.cu | #include <stdio.h>
#include <ncs/cuda/CUDA.h>
#include <ncs/cuda/FireTableUpdater.cuh>
#include <ncs/sim/CUDA.h>
namespace ncs {
namespace sim {
namespace cuda {
__global__ void updateTableKernel(ncs::sim::Bit::Word* neuron_fire_vector,
ncs::sim::Bit::Word* synapse_fire_table,
unsigned int synaptic_vector_size,
unsigned int row,
unsigned int num_rows,
unsigned int* presynaptic_neuron_ids,
unsigned int* synaptic_delays,
unsigned int num_synapses) {
unsigned int index = grid::thread();
unsigned int stride = grid::stride();
for (; index < num_synapses; index += stride) {
unsigned int pre_id = presynaptic_neuron_ids[index];
if (pre_id == 0xFFFFFFFF) {
continue;
}
unsigned int word = bit::word(pre_id);
unsigned int mask = bit::mask(pre_id);
if (neuron_fire_vector[word] & mask) {
unsigned int delay = synaptic_delays[index];
unsigned int event_row = row + delay;
if (event_row >= num_rows) {
event_row -= num_rows;
}
unsigned int event_mask = bit::mask(index);
unsigned int* event_word =
synapse_fire_table + event_row * synaptic_vector_size;
atomicOr(event_word + bit::word(index), event_mask);
}
}
}
void updateFireTable( Bit::Word* neuron_fire_vector,
Bit::Word* synapse_fire_table,
unsigned int synaptic_vector_size,
unsigned int row,
unsigned int num_rows,
unsigned int* presynaptic_neuron_ids,
unsigned int* synaptic_delays,
unsigned int num_synapses) {
updateTableKernel<<<CUDA::getNumberOfBlocks(num_synapses),
CUDA::getThreadsPerBlock(num_synapses),
0,
CUDA::getStream()>>>(neuron_fire_vector,
synapse_fire_table,
synaptic_vector_size,
row,
num_rows,
presynaptic_neuron_ids,
synaptic_delays,
num_synapses);
CUDA::synchronize();
}
} // namespace cuda
} // namespace sim
} // namespace ncs
|
aadabb5d9278824977830a9df80b0a3a966370bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nodes/equal.h"
__global__
void EqualKernel(int n, const float * a, const float * b, float * c)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
if (a[i] == 0 || b[i] == 0)
printf("%f %f\n", a[i], b[i]);
c[i] = fabs(a[i] - b[i]) < 0.0000000000000001f ? 1.0 : 0.0;
}
}
Equal::Equal(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_equal_param() == false) << "param.has_equal_param() == false";
}
void Equal::init() {
LOG_IF(FATAL, _inputs[0]->value()->size() != _inputs[0]->value()->size()) << "Size mismatch [FAILED]";
_outputs[0]->initValue(_inputs[0]->value()->dims());
}
void Equal::forward() {
auto size = _inputs[0]->value()->size();
EqualKernel << < numOfBlocks(size), maxThreadsPerBlock >> >(size, _inputs[0]->value()->gpu_data(), _inputs[1]->value()->gpu_data(), _outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void Equal::backward() {
}
std::string Equal::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.equal(" + _input_name_for_cpp(0) + ", " + _input_name_for_cpp(1) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
}
| aadabb5d9278824977830a9df80b0a3a966370bb.cu | #include "nodes/equal.h"
__global__
void EqualKernel(int n, const float * a, const float * b, float * c)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
if (a[i] == 0 || b[i] == 0)
printf("%f %f\n", a[i], b[i]);
c[i] = fabs(a[i] - b[i]) < 0.0000000000000001f ? 1.0 : 0.0;
}
}
Equal::Equal(deepflow::NodeParam *param) : Node(param) {
LOG_IF(FATAL, param->has_equal_param() == false) << "param.has_equal_param() == false";
}
void Equal::init() {
LOG_IF(FATAL, _inputs[0]->value()->size() != _inputs[0]->value()->size()) << "Size mismatch [FAILED]";
_outputs[0]->initValue(_inputs[0]->value()->dims());
}
void Equal::forward() {
auto size = _inputs[0]->value()->size();
EqualKernel << < numOfBlocks(size), maxThreadsPerBlock >> >(size, _inputs[0]->value()->gpu_data(), _inputs[1]->value()->gpu_data(), _outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void Equal::backward() {
}
std::string Equal::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.equal(" + _input_name_for_cpp(0) + ", " + _input_name_for_cpp(1) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
}
|
fcede388e37f69c17117f9ea6fb3cc216a3dd22b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixTransposeSqr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *P = NULL;
hipMalloc(&P, XSIZE*YSIZE);
double *M = NULL;
hipMalloc(&M, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixTransposeSqr), dim3(gridBlock),dim3(threadBlock), 0, 0, P,M,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixTransposeSqr), dim3(gridBlock),dim3(threadBlock), 0, 0, P,M,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixTransposeSqr), dim3(gridBlock),dim3(threadBlock), 0, 0, P,M,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fcede388e37f69c17117f9ea6fb3cc216a3dd22b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixTransposeSqr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *P = NULL;
cudaMalloc(&P, XSIZE*YSIZE);
double *M = NULL;
cudaMalloc(&M, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixTransposeSqr<<<gridBlock,threadBlock>>>(P,M,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixTransposeSqr<<<gridBlock,threadBlock>>>(P,M,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixTransposeSqr<<<gridBlock,threadBlock>>>(P,M,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c6218a687509444f29149cbc678ff1e403c56c6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" >
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" >
<meta name="ROBOTS" content="NOARCHIVE">
<link rel="icon" type="image/vnd.microsoft.icon" href="https://ssl.gstatic.com/codesite/ph/images/phosting.ico">
<script type="text/javascript">
var codesite_token = "MBDwIWyWnVeH7W7374m1cEh0iI0:1371808304867";
var CS_env = {"domainName":null,"assetHostPath":"https://ssl.gstatic.com/codesite/ph","profileUrl":"/u/116699586124044253698/","token":"MBDwIWyWnVeH7W7374m1cEh0iI0:1371808304867","relativeBaseUrl":"","projectName":"stanford-cs193g-sp2010","loggedInUserEmail":"gaurav.sachin007@gmail.com","assetVersionPath":"https://ssl.gstatic.com/codesite/ph/18376132045800511552","projectHomeUrl":"/p/stanford-cs193g-sp2010"};
var _gaq = _gaq || [];
_gaq.push(
['siteTracker._setAccount', 'UA-18071-1'],
['siteTracker._trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(ga);
})();
</script>
<title>block_scan.cu -
stanford-cs193g-sp2010 -
Programming Massively Parallel Processors with CUDA - Google Project Hosting
</title>
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/core.css">
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/ph_detail.css" >
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/d_sb.css" >
<!--[if IE]>
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/d_ie.css" >
<![endif]-->
<style type="text/css">
.menuIcon.off { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -42px }
.menuIcon.on { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -28px }
.menuIcon.down { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 0; }
tr.inline_comment {
background: #fff;
vertical-align: top;
}
div.draft, div.published {
padding: .3em;
border: 1px solid #999;
margin-bottom: .1em;
font-family: arial, sans-serif;
max-width: 60em;
}
div.draft {
background: #ffa;
}
div.published {
background: #e5ecf9;
}
div.published .body, div.draft .body {
padding: .5em .1em .1em .1em;
max-width: 60em;
white-space: pre-wrap;
white-space: -moz-pre-wrap;
white-space: -pre-wrap;
white-space: -o-pre-wrap;
word-wrap: break-word;
font-size: 1em;
}
div.draft .actions {
margin-left: 1em;
font-size: 90%;
}
div.draft form {
padding: .5em .5em .5em 0;
}
div.draft textarea, div.published textarea {
width: 95%;
height: 10em;
font-family: arial, sans-serif;
margin-bottom: .5em;
}
.nocursor, .nocursor td, .cursor_hidden, .cursor_hidden td {
background-color: white;
height: 2px;
}
.cursor, .cursor td {
background-color: darkblue;
height: 2px;
display: '';
}
.list {
border: 1px solid white;
border-bottom: 0;
}
</style>
</head>
<body class="t4">
<script type="text/javascript">
window.___gcfg = {lang: 'en'};
(function()
{var po = document.createElement("script");
po.type = "text/javascript"; po.async = true;po.src = "https://apis.google.com/js/plusone.js";
var s = document.getElementsByTagName("script")[0];
s.parentNode.insertBefore(po, s);
})();
</script>
<div class="headbg">
<div id="gaia">
<span>
<a href="#" id="multilogin-dropdown" onclick="return false;"
><u><b>gaurav.sachin007@gmail.com</b></u> <small>▼</small></a>
| <a href="/u/116699586124044253698/" id="projects-dropdown" onclick="return false;"
><u>My favorites</u> <small>▼</small></a>
| <a href="/u/116699586124044253698/" onclick="_CS_click('/gb/ph/profile');"
title="Profile, Updates, and Settings"
><u>Profile</u></a>
| <a href="https://www.google.com/accounts/Logout?continue=https%3A%2F%2Fcode.google.com%2Fp%2Fstanford-cs193g-sp2010%2Fsource%2Fbrowse%2Ftrunk%2Ftutorials%2Fblock_scan.cu"
onclick="_CS_click('/gb/ph/signout');"
><u>Sign out</u></a>
</span>
</div>
<div class="gbh" style="left: 0pt;"></div>
<div class="gbh" style="right: 0pt;"></div>
<div style="height: 1px"></div>
<!--[if lte IE 7]>
<div style="text-align:center;">
Your version of Internet Explorer is not supported. Try a browser that
contributes to open source, such as <a href="http://www.firefox.com">Firefox</a>,
<a href="http://www.google.com/chrome">Google Chrome</a>, or
<a href="http://code.google.com/chrome/chromeframe/">Google Chrome Frame</a>.
</div>
<![endif]-->
<table style="padding:0px; margin: 0px 0px 10px 0px; width:100%" cellpadding="0" cellspacing="0"
itemscope itemtype="http://schema.org/CreativeWork">
<tr style="height: 58px;">
<td id="plogo">
<link itemprop="url" href="/p/stanford-cs193g-sp2010">
<a href="/p/stanford-cs193g-sp2010/">
<img src="https://ssl.gstatic.com/codesite/ph/images/defaultlogo.png" alt="Logo" itemprop="image">
</a>
</td>
<td style="padding-left: 0.5em">
<div id="pname">
<a href="/p/stanford-cs193g-sp2010/"><span itemprop="name">stanford-cs193g-sp2010</span></a>
</div>
<div id="psum">
<a id="project_summary_link"
href="/p/stanford-cs193g-sp2010/"><span itemprop="description">Programming Massively Parallel Processors with CUDA</span></a>
</div>
</td>
<td style="white-space:nowrap;text-align:right; vertical-align:bottom;">
<form action="/hosting/search">
<input size="30" name="q" value="" type="text">
<input type="submit" name="projectsearch" value="Search projects" >
</form>
</tr>
</table>
</div>
<div id="mt" class="gtb">
<a href="/p/stanford-cs193g-sp2010/" class="tab ">Project Home</a>
<a href="/p/stanford-cs193g-sp2010/w/list" class="tab ">Wiki</a>
<a href="/p/stanford-cs193g-sp2010/issues/list"
class="tab ">Issues</a>
<a href="/p/stanford-cs193g-sp2010/source/checkout"
class="tab active">Source</a>
<div class=gtbc></div>
</div>
<table cellspacing="0" cellpadding="0" width="100%" align="center" border="0" class="st">
<tr>
<td class="subt">
<div class="st2">
<div class="isf">
<span class="inst1"><a href="/p/stanford-cs193g-sp2010/source/checkout">Checkout</a></span>
<span class="inst2"><a href="/p/stanford-cs193g-sp2010/source/browse/">Browse</a></span>
<span class="inst3"><a href="/p/stanford-cs193g-sp2010/source/list">Changes</a></span>
</form>
<script type="text/javascript">
function codesearchQuery(form) {
var query = document.getElementById('q').value;
if (query) { form.action += '%20' + query; }
}
</script>
</div>
</div>
</td>
<td align="right" valign="top" class="bevel-right"></td>
</tr>
</table>
<script type="text/javascript">
var cancelBubble = false;
function _go(url) { document.location = url; }
</script>
<div id="maincol"
>
<div class="expand">
<div id="colcontrol">
<style type="text/css">
#file_flipper { white-space: nowrap; padding-right: 2em; }
#file_flipper.hidden { display: none; }
#file_flipper .pagelink { color: #0000CC; text-decoration: underline; }
#file_flipper #visiblefiles { padding-left: 0.5em; padding-right: 0.5em; }
</style>
<table id="nav_and_rev" class="list"
cellpadding="0" cellspacing="0" width="100%">
<tr>
<td nowrap="nowrap" class="src_crumbs src_nav" width="33%">
<strong class="src_nav">Source path: </strong>
<span id="crumb_root">
<a href="/p/stanford-cs193g-sp2010/source/browse/">svn</a>/ </span>
<span id="crumb_links" class="ifClosed"><a href="/p/stanford-cs193g-sp2010/source/browse/trunk/">trunk</a><span class="sp">/ </span><a href="/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/">tutorials</a><span class="sp">/ </span>block_scan.cu</span>
</td>
<td nowrap="nowrap" width="33%" align="center">
<a href="/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/block_scan.cu?edit=1"
><img src="https://ssl.gstatic.com/codesite/ph/images/pencil-y14.png"
class="edit_icon">Edit file</a>
</td>
<td nowrap="nowrap" width="33%" align="right">
<table cellpadding="0" cellspacing="0" style="font-size: 100%"><tr>
<td class="flipper"><b>r285</b></td>
</tr></table>
</td>
</tr>
</table>
<div class="fc">
<style type="text/css">
.undermouse span {
background-image: url(https://ssl.gstatic.com/codesite/ph/images/comments.gif); }
</style>
<table class="opened" id="review_comment_area"
><tr>
<td id="nums">
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
<pre><table width="100%" id="nums_table_0"><tr id="gr_svn285_1"
><td id="1"><a href="#1">1</a></td></tr
><tr id="gr_svn285_2"
><td id="2"><a href="#2">2</a></td></tr
><tr id="gr_svn285_3"
><td id="3"><a href="#3">3</a></td></tr
><tr id="gr_svn285_4"
><td id="4"><a href="#4">4</a></td></tr
><tr id="gr_svn285_5"
><td id="5"><a href="#5">5</a></td></tr
><tr id="gr_svn285_6"
><td id="6"><a href="#6">6</a></td></tr
><tr id="gr_svn285_7"
><td id="7"><a href="#7">7</a></td></tr
><tr id="gr_svn285_8"
><td id="8"><a href="#8">8</a></td></tr
><tr id="gr_svn285_9"
><td id="9"><a href="#9">9</a></td></tr
><tr id="gr_svn285_10"
><td id="10"><a href="#10">10</a></td></tr
><tr id="gr_svn285_11"
><td id="11"><a href="#11">11</a></td></tr
><tr id="gr_svn285_12"
><td id="12"><a href="#12">12</a></td></tr
><tr id="gr_svn285_13"
><td id="13"><a href="#13">13</a></td></tr
><tr id="gr_svn285_14"
><td id="14"><a href="#14">14</a></td></tr
><tr id="gr_svn285_15"
><td id="15"><a href="#15">15</a></td></tr
><tr id="gr_svn285_16"
><td id="16"><a href="#16">16</a></td></tr
><tr id="gr_svn285_17"
><td id="17"><a href="#17">17</a></td></tr
><tr id="gr_svn285_18"
><td id="18"><a href="#18">18</a></td></tr
><tr id="gr_svn285_19"
><td id="19"><a href="#19">19</a></td></tr
><tr id="gr_svn285_20"
><td id="20"><a href="#20">20</a></td></tr
><tr id="gr_svn285_21"
><td id="21"><a href="#21">21</a></td></tr
><tr id="gr_svn285_22"
><td id="22"><a href="#22">22</a></td></tr
><tr id="gr_svn285_23"
><td id="23"><a href="#23">23</a></td></tr
><tr id="gr_svn285_24"
><td id="24"><a href="#24">24</a></td></tr
><tr id="gr_svn285_25"
><td id="25"><a href="#25">25</a></td></tr
><tr id="gr_svn285_26"
><td id="26"><a href="#26">26</a></td></tr
><tr id="gr_svn285_27"
><td id="27"><a href="#27">27</a></td></tr
><tr id="gr_svn285_28"
><td id="28"><a href="#28">28</a></td></tr
><tr id="gr_svn285_29"
><td id="29"><a href="#29">29</a></td></tr
><tr id="gr_svn285_30"
><td id="30"><a href="#30">30</a></td></tr
><tr id="gr_svn285_31"
><td id="31"><a href="#31">31</a></td></tr
><tr id="gr_svn285_32"
><td id="32"><a href="#32">32</a></td></tr
><tr id="gr_svn285_33"
><td id="33"><a href="#33">33</a></td></tr
><tr id="gr_svn285_34"
><td id="34"><a href="#34">34</a></td></tr
><tr id="gr_svn285_35"
><td id="35"><a href="#35">35</a></td></tr
><tr id="gr_svn285_36"
><td id="36"><a href="#36">36</a></td></tr
><tr id="gr_svn285_37"
><td id="37"><a href="#37">37</a></td></tr
><tr id="gr_svn285_38"
><td id="38"><a href="#38">38</a></td></tr
><tr id="gr_svn285_39"
><td id="39"><a href="#39">39</a></td></tr
><tr id="gr_svn285_40"
><td id="40"><a href="#40">40</a></td></tr
><tr id="gr_svn285_41"
><td id="41"><a href="#41">41</a></td></tr
><tr id="gr_svn285_42"
><td id="42"><a href="#42">42</a></td></tr
><tr id="gr_svn285_43"
><td id="43"><a href="#43">43</a></td></tr
><tr id="gr_svn285_44"
><td id="44"><a href="#44">44</a></td></tr
><tr id="gr_svn285_45"
><td id="45"><a href="#45">45</a></td></tr
><tr id="gr_svn285_46"
><td id="46"><a href="#46">46</a></td></tr
><tr id="gr_svn285_47"
><td id="47"><a href="#47">47</a></td></tr
><tr id="gr_svn285_48"
><td id="48"><a href="#48">48</a></td></tr
><tr id="gr_svn285_49"
><td id="49"><a href="#49">49</a></td></tr
><tr id="gr_svn285_50"
><td id="50"><a href="#50">50</a></td></tr
><tr id="gr_svn285_51"
><td id="51"><a href="#51">51</a></td></tr
><tr id="gr_svn285_52"
><td id="52"><a href="#52">52</a></td></tr
><tr id="gr_svn285_53"
><td id="53"><a href="#53">53</a></td></tr
><tr id="gr_svn285_54"
><td id="54"><a href="#54">54</a></td></tr
><tr id="gr_svn285_55"
><td id="55"><a href="#55">55</a></td></tr
><tr id="gr_svn285_56"
><td id="56"><a href="#56">56</a></td></tr
><tr id="gr_svn285_57"
><td id="57"><a href="#57">57</a></td></tr
><tr id="gr_svn285_58"
><td id="58"><a href="#58">58</a></td></tr
><tr id="gr_svn285_59"
><td id="59"><a href="#59">59</a></td></tr
><tr id="gr_svn285_60"
><td id="60"><a href="#60">60</a></td></tr
><tr id="gr_svn285_61"
><td id="61"><a href="#61">61</a></td></tr
><tr id="gr_svn285_62"
><td id="62"><a href="#62">62</a></td></tr
><tr id="gr_svn285_63"
><td id="63"><a href="#63">63</a></td></tr
><tr id="gr_svn285_64"
><td id="64"><a href="#64">64</a></td></tr
><tr id="gr_svn285_65"
><td id="65"><a href="#65">65</a></td></tr
><tr id="gr_svn285_66"
><td id="66"><a href="#66">66</a></td></tr
><tr id="gr_svn285_67"
><td id="67"><a href="#67">67</a></td></tr
><tr id="gr_svn285_68"
><td id="68"><a href="#68">68</a></td></tr
><tr id="gr_svn285_69"
><td id="69"><a href="#69">69</a></td></tr
><tr id="gr_svn285_70"
><td id="70"><a href="#70">70</a></td></tr
><tr id="gr_svn285_71"
><td id="71"><a href="#71">71</a></td></tr
><tr id="gr_svn285_72"
><td id="72"><a href="#72">72</a></td></tr
><tr id="gr_svn285_73"
><td id="73"><a href="#73">73</a></td></tr
><tr id="gr_svn285_74"
><td id="74"><a href="#74">74</a></td></tr
><tr id="gr_svn285_75"
><td id="75"><a href="#75">75</a></td></tr
><tr id="gr_svn285_76"
><td id="76"><a href="#76">76</a></td></tr
><tr id="gr_svn285_77"
><td id="77"><a href="#77">77</a></td></tr
><tr id="gr_svn285_78"
><td id="78"><a href="#78">78</a></td></tr
><tr id="gr_svn285_79"
><td id="79"><a href="#79">79</a></td></tr
><tr id="gr_svn285_80"
><td id="80"><a href="#80">80</a></td></tr
><tr id="gr_svn285_81"
><td id="81"><a href="#81">81</a></td></tr
><tr id="gr_svn285_82"
><td id="82"><a href="#82">82</a></td></tr
><tr id="gr_svn285_83"
><td id="83"><a href="#83">83</a></td></tr
><tr id="gr_svn285_84"
><td id="84"><a href="#84">84</a></td></tr
><tr id="gr_svn285_85"
><td id="85"><a href="#85">85</a></td></tr
><tr id="gr_svn285_86"
><td id="86"><a href="#86">86</a></td></tr
><tr id="gr_svn285_87"
><td id="87"><a href="#87">87</a></td></tr
><tr id="gr_svn285_88"
><td id="88"><a href="#88">88</a></td></tr
><tr id="gr_svn285_89"
><td id="89"><a href="#89">89</a></td></tr
><tr id="gr_svn285_90"
><td id="90"><a href="#90">90</a></td></tr
><tr id="gr_svn285_91"
><td id="91"><a href="#91">91</a></td></tr
><tr id="gr_svn285_92"
><td id="92"><a href="#92">92</a></td></tr
><tr id="gr_svn285_93"
><td id="93"><a href="#93">93</a></td></tr
><tr id="gr_svn285_94"
><td id="94"><a href="#94">94</a></td></tr
><tr id="gr_svn285_95"
><td id="95"><a href="#95">95</a></td></tr
><tr id="gr_svn285_96"
><td id="96"><a href="#96">96</a></td></tr
><tr id="gr_svn285_97"
><td id="97"><a href="#97">97</a></td></tr
></table></pre>
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
</td>
<td id="lines">
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
<pre ><table id="src_table_0"><tr
id=sl_svn285_1
><td class="source">// This example demonstrates a block-wise inclusive<br></td></tr
><tr
id=sl_svn285_2
><td class="source">// parallel prefix sum (scan) algorithm.<br></td></tr
><tr
id=sl_svn285_3
><td class="source"><br></td></tr
><tr
id=sl_svn285_4
><td class="source">#include <stdlib.h><br></td></tr
><tr
id=sl_svn285_5
><td class="source">#include <stdio.h><br></td></tr
><tr
id=sl_svn285_6
><td class="source">#include <vector><br></td></tr
><tr
id=sl_svn285_7
><td class="source">#include <iostream><br></td></tr
><tr
id=sl_svn285_8
><td class="source"><br></td></tr
><tr
id=sl_svn285_9
><td class="source"><br></td></tr
><tr
id=sl_svn285_10
><td class="source">// This kernel computes, per-block, a block-sized scan<br></td></tr
><tr
id=sl_svn285_11
><td class="source">// of the input. It assumes that the block size evenly<br></td></tr
><tr
id=sl_svn285_12
><td class="source">// divides the input size<br></td></tr
><tr
id=sl_svn285_13
><td class="source">__global__ void inclusive_scan(const unsigned int *input,<br></td></tr
><tr
id=sl_svn285_14
><td class="source"> unsigned int *result)<br></td></tr
><tr
id=sl_svn285_15
><td class="source">{<br></td></tr
><tr
id=sl_svn285_16
><td class="source"> extern __shared__ unsigned int sdata[];<br></td></tr
><tr
id=sl_svn285_17
><td class="source"><br></td></tr
><tr
id=sl_svn285_18
><td class="source"> unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;<br></td></tr
><tr
id=sl_svn285_19
><td class="source"><br></td></tr
><tr
id=sl_svn285_20
><td class="source"> // load input into __shared__ memory<br></td></tr
><tr
id=sl_svn285_21
><td class="source"> unsigned int sum = input[i];<br></td></tr
><tr
id=sl_svn285_22
><td class="source"> sdata[threadIdx.x] = sum;<br></td></tr
><tr
id=sl_svn285_23
><td class="source"> __syncthreads();<br></td></tr
><tr
id=sl_svn285_24
><td class="source"> for(int offset = 1; offset < blockDim.x; offset <<= 1)<br></td></tr
><tr
id=sl_svn285_25
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_26
><td class="source"> if(threadIdx.x >= offset)<br></td></tr
><tr
id=sl_svn285_27
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_28
><td class="source"> sum += sdata[threadIdx.x - offset];<br></td></tr
><tr
id=sl_svn285_29
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_30
><td class="source"><br></td></tr
><tr
id=sl_svn285_31
><td class="source"> // wait until every thread has updated its partial sum<br></td></tr
><tr
id=sl_svn285_32
><td class="source"> __syncthreads();<br></td></tr
><tr
id=sl_svn285_33
><td class="source"><br></td></tr
><tr
id=sl_svn285_34
><td class="source"> // write my partial sum<br></td></tr
><tr
id=sl_svn285_35
><td class="source"> sdata[threadIdx.x] = sum;<br></td></tr
><tr
id=sl_svn285_36
><td class="source"><br></td></tr
><tr
id=sl_svn285_37
><td class="source"> // wait until every thread has written its partial sum<br></td></tr
><tr
id=sl_svn285_38
><td class="source"> __syncthreads();<br></td></tr
><tr
id=sl_svn285_39
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_40
><td class="source"><br></td></tr
><tr
id=sl_svn285_41
><td class="source"> // we're done! each thread writes out its result<br></td></tr
><tr
id=sl_svn285_42
><td class="source"> result[i] = sdata[threadIdx.x];<br></td></tr
><tr
id=sl_svn285_43
><td class="source">}<br></td></tr
><tr
id=sl_svn285_44
><td class="source"> <br></td></tr
><tr
id=sl_svn285_45
><td class="source"><br></td></tr
><tr
id=sl_svn285_46
><td class="source">int main(void)<br></td></tr
><tr
id=sl_svn285_47
><td class="source">{<br></td></tr
><tr
id=sl_svn285_48
><td class="source"> // use small input sizes for illustrative purposes<br></td></tr
><tr
id=sl_svn285_49
><td class="source"> const int num_blocks = 4;<br></td></tr
><tr
id=sl_svn285_50
><td class="source"> const int block_size = 16;<br></td></tr
><tr
id=sl_svn285_51
><td class="source"> const int num_elements = num_blocks * block_size;<br></td></tr
><tr
id=sl_svn285_52
><td class="source"><br></td></tr
><tr
id=sl_svn285_53
><td class="source"> // generate random input in [0,5] on the host<br></td></tr
><tr
id=sl_svn285_54
><td class="source"> std::vector<unsigned int> h_input(num_elements);<br></td></tr
><tr
id=sl_svn285_55
><td class="source"> for(unsigned int i = 0; i < num_elements; ++i)<br></td></tr
><tr
id=sl_svn285_56
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_57
><td class="source"> h_input[i] = rand() % 6;<br></td></tr
><tr
id=sl_svn285_58
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_59
><td class="source"><br></td></tr
><tr
id=sl_svn285_60
><td class="source"> // copy input to device memory<br></td></tr
><tr
id=sl_svn285_61
><td class="source"> unsigned int *d_input = 0;<br></td></tr
><tr
id=sl_svn285_62
><td class="source"> hipMalloc((void**)&d_input, sizeof(unsigned int) * num_elements);<br></td></tr
><tr
id=sl_svn285_63
><td class="source"> hipMemcpy(d_input, &h_input[0], sizeof(unsigned int) * num_elements, hipMemcpyHostToDevice);<br></td></tr
><tr
id=sl_svn285_64
><td class="source"><br></td></tr
><tr
id=sl_svn285_65
><td class="source"> // allocate space for the result<br></td></tr
><tr
id=sl_svn285_66
><td class="source"> unsigned int *d_result = 0;<br></td></tr
><tr
id=sl_svn285_67
><td class="source"> hipMalloc((void**)&d_result, sizeof(unsigned int) * num_elements);<br></td></tr
><tr
id=sl_svn285_68
><td class="source"><br></td></tr
><tr
id=sl_svn285_69
><td class="source"> inclusive_scan<<<num_blocks, block_size, block_size * sizeof(unsigned int)>>>(d_input, d_result);<br></td></tr
><tr
id=sl_svn285_70
><td class="source"><br></td></tr
><tr
id=sl_svn285_71
><td class="source"> // copy result to host memory<br></td></tr
><tr
id=sl_svn285_72
><td class="source"> std::vector<unsigned int> h_result(num_elements);<br></td></tr
><tr
id=sl_svn285_73
><td class="source"> hipMemcpy(&h_result[0], d_result, sizeof(unsigned int) * num_elements, hipMemcpyDeviceToHost);<br></td></tr
><tr
id=sl_svn285_74
><td class="source"><br></td></tr
><tr
id=sl_svn285_75
><td class="source"> // print out the results<br></td></tr
><tr
id=sl_svn285_76
><td class="source"> for(int b = 0; b < num_blocks; ++b)<br></td></tr
><tr
id=sl_svn285_77
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_78
><td class="source"> std::cout << "Block " << b << std::endl << std::endl;<br></td></tr
><tr
id=sl_svn285_79
><td class="source"><br></td></tr
><tr
id=sl_svn285_80
><td class="source"> std::cout << "Input: " << std::endl;<br></td></tr
><tr
id=sl_svn285_81
><td class="source"> for(int i = 0; i < block_size; ++i)<br></td></tr
><tr
id=sl_svn285_82
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_83
><td class="source"> printf("%2d ", h_input[b * block_size + i]);<br></td></tr
><tr
id=sl_svn285_84
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_85
><td class="source"> std::cout << std::endl;<br></td></tr
><tr
id=sl_svn285_86
><td class="source"><br></td></tr
><tr
id=sl_svn285_87
><td class="source"> std::cout << "Result: " << std::endl;<br></td></tr
><tr
id=sl_svn285_88
><td class="source"> for(int i = 0; i < block_size; ++i)<br></td></tr
><tr
id=sl_svn285_89
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_90
><td class="source"> printf("%2d ", h_result[b * block_size + i]);<br></td></tr
><tr
id=sl_svn285_91
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_92
><td class="source"> std::cout << std::endl << std::endl << std::endl;<br></td></tr
><tr
id=sl_svn285_93
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_94
><td class="source"><br></td></tr
><tr
id=sl_svn285_95
><td class="source"> return 0;<br></td></tr
><tr
id=sl_svn285_96
><td class="source">}<br></td></tr
><tr
id=sl_svn285_97
><td class="source"><br></td></tr
></table></pre>
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
</td>
</tr></table>
<script type="text/javascript">
var lineNumUnderMouse = -1;
function gutterOver(num) {
gutterOut();
var newTR = document.getElementById('gr_svn285_' + num);
if (newTR) {
newTR.className = 'undermouse';
}
lineNumUnderMouse = num;
}
function gutterOut() {
if (lineNumUnderMouse != -1) {
var oldTR = document.getElementById(
'gr_svn285_' + lineNumUnderMouse);
if (oldTR) {
oldTR.className = '';
}
lineNumUnderMouse = -1;
}
}
var numsGenState = {table_base_id: 'nums_table_'};
var srcGenState = {table_base_id: 'src_table_'};
var alignerRunning = false;
var startOver = false;
function setLineNumberHeights() {
if (alignerRunning) {
startOver = true;
return;
}
numsGenState.chunk_id = 0;
numsGenState.table = document.getElementById('nums_table_0');
numsGenState.row_num = 0;
if (!numsGenState.table) {
return; // Silently exit if no file is present.
}
srcGenState.chunk_id = 0;
srcGenState.table = document.getElementById('src_table_0');
srcGenState.row_num = 0;
alignerRunning = true;
continueToSetLineNumberHeights();
}
function rowGenerator(genState) {
if (genState.row_num < genState.table.rows.length) {
var currentRow = genState.table.rows[genState.row_num];
genState.row_num++;
return currentRow;
}
var newTable = document.getElementById(
genState.table_base_id + (genState.chunk_id + 1));
if (newTable) {
genState.chunk_id++;
genState.row_num = 0;
genState.table = newTable;
return genState.table.rows[0];
}
return null;
}
var MAX_ROWS_PER_PASS = 1000;
function continueToSetLineNumberHeights() {
var rowsInThisPass = 0;
var numRow = 1;
var srcRow = 1;
while (numRow && srcRow && rowsInThisPass < MAX_ROWS_PER_PASS) {
numRow = rowGenerator(numsGenState);
srcRow = rowGenerator(srcGenState);
rowsInThisPass++;
if (numRow && srcRow) {
if (numRow.offsetHeight != srcRow.offsetHeight) {
numRow.firstChild.style.height = srcRow.offsetHeight + 'px';
}
}
}
if (rowsInThisPass >= MAX_ROWS_PER_PASS) {
setTimeout(continueToSetLineNumberHeights, 10);
} else {
alignerRunning = false;
if (startOver) {
startOver = false;
setTimeout(setLineNumberHeights, 500);
}
}
}
function initLineNumberHeights() {
// Do 2 complete passes, because there can be races
// between this code and prettify.
startOver = true;
setTimeout(setLineNumberHeights, 250);
window.onresize = setLineNumberHeights;
}
initLineNumberHeights();
</script>
<div id="log">
<div style="text-align:right">
<a class="ifCollapse" href="#" onclick="_toggleMeta(this); return false">Show details</a>
<a class="ifExpand" href="#" onclick="_toggleMeta(this); return false">Hide details</a>
</div>
<div class="ifExpand">
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="changelog">
<p>Change log</p>
<div>
<a href="/p/stanford-cs193g-sp2010/source/detail?spec=svn285&r=223">r223</a>
by jaredhoberock
on Apr 14, 2010
<a href="/p/stanford-cs193g-sp2010/source/diff?spec=svn285&r=223&format=side&path=/trunk/tutorials/block_scan.cu&old_path=/trunk/tutorials/block_scan.cu&old=">Diff</a>
</div>
<pre>Add block scan example code.
</pre>
</div>
<script type="text/javascript">
var detail_url = '/p/stanford-cs193g-sp2010/source/detail?r=223&spec=svn285';
var publish_url = '/p/stanford-cs193g-sp2010/source/detail?r=223&spec=svn285#publish';
// describe the paths of this revision in javascript.
var changed_paths = [];
var changed_urls = [];
changed_paths.push('/trunk/tutorials/block_scan.cu');
changed_urls.push('/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/block_scan.cu?r\x3d223\x26spec\x3dsvn285');
var selected_path = '/trunk/tutorials/block_scan.cu';
function getCurrentPageIndex() {
for (var i = 0; i < changed_paths.length; i++) {
if (selected_path == changed_paths[i]) {
return i;
}
}
}
function getNextPage() {
var i = getCurrentPageIndex();
if (i < changed_paths.length - 1) {
return changed_urls[i + 1];
}
return null;
}
function getPreviousPage() {
var i = getCurrentPageIndex();
if (i > 0) {
return changed_urls[i - 1];
}
return null;
}
function gotoNextPage() {
var page = getNextPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoPreviousPage() {
var page = getPreviousPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoDetailPage() {
window.location = detail_url;
}
function gotoPublishPage() {
window.location = publish_url;
}
</script>
<style type="text/css">
#review_nav {
border-top: 3px solid white;
padding-top: 6px;
margin-top: 1em;
}
#review_nav td {
vertical-align: middle;
}
#review_nav select {
margin: .5em 0;
}
</style>
<div id="review_nav">
<table><tr><td>Go to: </td><td>
<select name="files_in_rev" onchange="window.location=this.value">
<option value="/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/block_scan.cu?r=223&spec=svn285"
selected="selected"
>/trunk/tutorials/block_scan.cu</option>
</select>
</td></tr></table>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="older_bubble">
<p>Older revisions</p>
<a href="/p/stanford-cs193g-sp2010/source/list?path=/trunk/tutorials/block_scan.cu&start=223">All revisions of this file</a>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="fileinfo_bubble">
<p>File info</p>
<div>Size: 2767 bytes,
97 lines</div>
<div><a href="//stanford-cs193g-sp2010.googlecode.com/svn/trunk/tutorials/block_scan.cu">View raw file</a></div>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
</div>
</div>
</div>
</div>
</div>
<script src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/source_file_scripts.js"></script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/kibbles.js"></script>
<script type="text/javascript">
var lastStop = null;
var initialized = false;
function updateCursor(next, prev) {
if (prev && prev.element) {
prev.element.className = 'cursor_stop cursor_hidden';
}
if (next && next.element) {
next.element.className = 'cursor_stop cursor';
lastStop = next.index;
}
}
function pubRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftDestroyed(data) {
updateCursorForCell(data.cellId, 'nocursor');
if (initialized) {
reloadCursors();
}
}
function reloadCursors() {
kibbles.skipper.reset();
loadCursors();
if (lastStop != null) {
kibbles.skipper.setCurrentStop(lastStop);
}
}
// possibly the simplest way to insert any newly added comments
// is to update the class of the corresponding cursor row,
// then refresh the entire list of rows.
function updateCursorForCell(cellId, className) {
var cell = document.getElementById(cellId);
// we have to go two rows back to find the cursor location
var row = getPreviousElement(cell.parentNode);
row.className = className;
}
// returns the previous element, ignores text nodes.
function getPreviousElement(e) {
var element = e.previousSibling;
if (element.nodeType == 3) {
element = element.previousSibling;
}
if (element && element.tagName) {
return element;
}
}
function loadCursors() {
// register our elements with skipper
var elements = CR_getElements('*', 'cursor_stop');
var len = elements.length;
for (var i = 0; i < len; i++) {
var element = elements[i];
element.className = 'cursor_stop cursor_hidden';
kibbles.skipper.append(element);
}
}
function toggleComments() {
CR_toggleCommentDisplay();
reloadCursors();
}
function keysOnLoadHandler() {
// setup skipper
kibbles.skipper.addStopListener(
kibbles.skipper.LISTENER_TYPE.PRE, updateCursor);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_top', 50);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_bottom', 100);
// Register our keys
kibbles.skipper.addFwdKey("n");
kibbles.skipper.addRevKey("p");
kibbles.keys.addKeyPressListener(
'u', function() { window.location = detail_url; });
kibbles.keys.addKeyPressListener(
'r', function() { window.location = detail_url + '#publish'; });
kibbles.keys.addKeyPressListener('j', gotoNextPage);
kibbles.keys.addKeyPressListener('k', gotoPreviousPage);
}
</script>
<script src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/code_review_scripts.js"></script>
<script type="text/javascript">
function showPublishInstructions() {
var element = document.getElementById('review_instr');
if (element) {
element.className = 'opened';
}
}
var codereviews;
function revsOnLoadHandler() {
// register our source container with the commenting code
var paths = {'svn285': '/trunk/tutorials/block_scan.cu'}
codereviews = CR_controller.setup(
{"domainName":null,"assetHostPath":"https://ssl.gstatic.com/codesite/ph","profileUrl":"/u/116699586124044253698/","token":"MBDwIWyWnVeH7W7374m1cEh0iI0:1371808304867","relativeBaseUrl":"","projectName":"stanford-cs193g-sp2010","loggedInUserEmail":"gaurav.sachin007@gmail.com","assetVersionPath":"https://ssl.gstatic.com/codesite/ph/18376132045800511552","projectHomeUrl":"/p/stanford-cs193g-sp2010"}, '', 'svn285', paths,
CR_BrowseIntegrationFactory);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, showPublishInstructions);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_PUB_PLATE, pubRevealed);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, draftRevealed);
codereviews.registerActivityListener(CR_ActivityType.DISCARD_DRAFT_COMMENT, draftDestroyed);
var initialized = true;
reloadCursors();
}
window.onload = function() {keysOnLoadHandler(); revsOnLoadHandler();};
</script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/dit_scripts.js"></script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/ph_core.js"></script>
</div>
<div id="footer" dir="ltr">
<div class="text">
<a href="/projecthosting/terms.html">Terms</a> -
<a href="http://www.google.com/privacy.html">Privacy</a> -
<a href="/p/support/">Project Hosting Help</a>
</div>
</div>
<div class="hostedBy" style="margin-top: -20px;">
<span style="vertical-align: top;">Powered by <a href="http://code.google.com/projecthosting/">Google Project Hosting</a></span>
</div>
</body>
</html>
| c6218a687509444f29149cbc678ff1e403c56c6c.cu |
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" >
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" >
<meta name="ROBOTS" content="NOARCHIVE">
<link rel="icon" type="image/vnd.microsoft.icon" href="https://ssl.gstatic.com/codesite/ph/images/phosting.ico">
<script type="text/javascript">
var codesite_token = "MBDwIWyWnVeH7W7374m1cEh0iI0:1371808304867";
var CS_env = {"domainName":null,"assetHostPath":"https://ssl.gstatic.com/codesite/ph","profileUrl":"/u/116699586124044253698/","token":"MBDwIWyWnVeH7W7374m1cEh0iI0:1371808304867","relativeBaseUrl":"","projectName":"stanford-cs193g-sp2010","loggedInUserEmail":"gaurav.sachin007@gmail.com","assetVersionPath":"https://ssl.gstatic.com/codesite/ph/18376132045800511552","projectHomeUrl":"/p/stanford-cs193g-sp2010"};
var _gaq = _gaq || [];
_gaq.push(
['siteTracker._setAccount', 'UA-18071-1'],
['siteTracker._trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(ga);
})();
</script>
<title>block_scan.cu -
stanford-cs193g-sp2010 -
Programming Massively Parallel Processors with CUDA - Google Project Hosting
</title>
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/core.css">
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/ph_detail.css" >
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/d_sb.css" >
<!--[if IE]>
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/18376132045800511552/css/d_ie.css" >
<![endif]-->
<style type="text/css">
.menuIcon.off { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -42px }
.menuIcon.on { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -28px }
.menuIcon.down { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 0; }
tr.inline_comment {
background: #fff;
vertical-align: top;
}
div.draft, div.published {
padding: .3em;
border: 1px solid #999;
margin-bottom: .1em;
font-family: arial, sans-serif;
max-width: 60em;
}
div.draft {
background: #ffa;
}
div.published {
background: #e5ecf9;
}
div.published .body, div.draft .body {
padding: .5em .1em .1em .1em;
max-width: 60em;
white-space: pre-wrap;
white-space: -moz-pre-wrap;
white-space: -pre-wrap;
white-space: -o-pre-wrap;
word-wrap: break-word;
font-size: 1em;
}
div.draft .actions {
margin-left: 1em;
font-size: 90%;
}
div.draft form {
padding: .5em .5em .5em 0;
}
div.draft textarea, div.published textarea {
width: 95%;
height: 10em;
font-family: arial, sans-serif;
margin-bottom: .5em;
}
.nocursor, .nocursor td, .cursor_hidden, .cursor_hidden td {
background-color: white;
height: 2px;
}
.cursor, .cursor td {
background-color: darkblue;
height: 2px;
display: '';
}
.list {
border: 1px solid white;
border-bottom: 0;
}
</style>
</head>
<body class="t4">
<script type="text/javascript">
window.___gcfg = {lang: 'en'};
(function()
{var po = document.createElement("script");
po.type = "text/javascript"; po.async = true;po.src = "https://apis.google.com/js/plusone.js";
var s = document.getElementsByTagName("script")[0];
s.parentNode.insertBefore(po, s);
})();
</script>
<div class="headbg">
<div id="gaia">
<span>
<a href="#" id="multilogin-dropdown" onclick="return false;"
><u><b>gaurav.sachin007@gmail.com</b></u> <small>▼</small></a>
| <a href="/u/116699586124044253698/" id="projects-dropdown" onclick="return false;"
><u>My favorites</u> <small>▼</small></a>
| <a href="/u/116699586124044253698/" onclick="_CS_click('/gb/ph/profile');"
title="Profile, Updates, and Settings"
><u>Profile</u></a>
| <a href="https://www.google.com/accounts/Logout?continue=https%3A%2F%2Fcode.google.com%2Fp%2Fstanford-cs193g-sp2010%2Fsource%2Fbrowse%2Ftrunk%2Ftutorials%2Fblock_scan.cu"
onclick="_CS_click('/gb/ph/signout');"
><u>Sign out</u></a>
</span>
</div>
<div class="gbh" style="left: 0pt;"></div>
<div class="gbh" style="right: 0pt;"></div>
<div style="height: 1px"></div>
<!--[if lte IE 7]>
<div style="text-align:center;">
Your version of Internet Explorer is not supported. Try a browser that
contributes to open source, such as <a href="http://www.firefox.com">Firefox</a>,
<a href="http://www.google.com/chrome">Google Chrome</a>, or
<a href="http://code.google.com/chrome/chromeframe/">Google Chrome Frame</a>.
</div>
<![endif]-->
<table style="padding:0px; margin: 0px 0px 10px 0px; width:100%" cellpadding="0" cellspacing="0"
itemscope itemtype="http://schema.org/CreativeWork">
<tr style="height: 58px;">
<td id="plogo">
<link itemprop="url" href="/p/stanford-cs193g-sp2010">
<a href="/p/stanford-cs193g-sp2010/">
<img src="https://ssl.gstatic.com/codesite/ph/images/defaultlogo.png" alt="Logo" itemprop="image">
</a>
</td>
<td style="padding-left: 0.5em">
<div id="pname">
<a href="/p/stanford-cs193g-sp2010/"><span itemprop="name">stanford-cs193g-sp2010</span></a>
</div>
<div id="psum">
<a id="project_summary_link"
href="/p/stanford-cs193g-sp2010/"><span itemprop="description">Programming Massively Parallel Processors with CUDA</span></a>
</div>
</td>
<td style="white-space:nowrap;text-align:right; vertical-align:bottom;">
<form action="/hosting/search">
<input size="30" name="q" value="" type="text">
<input type="submit" name="projectsearch" value="Search projects" >
</form>
</tr>
</table>
</div>
<div id="mt" class="gtb">
<a href="/p/stanford-cs193g-sp2010/" class="tab ">Project Home</a>
<a href="/p/stanford-cs193g-sp2010/w/list" class="tab ">Wiki</a>
<a href="/p/stanford-cs193g-sp2010/issues/list"
class="tab ">Issues</a>
<a href="/p/stanford-cs193g-sp2010/source/checkout"
class="tab active">Source</a>
<div class=gtbc></div>
</div>
<table cellspacing="0" cellpadding="0" width="100%" align="center" border="0" class="st">
<tr>
<td class="subt">
<div class="st2">
<div class="isf">
<span class="inst1"><a href="/p/stanford-cs193g-sp2010/source/checkout">Checkout</a></span>
<span class="inst2"><a href="/p/stanford-cs193g-sp2010/source/browse/">Browse</a></span>
<span class="inst3"><a href="/p/stanford-cs193g-sp2010/source/list">Changes</a></span>
</form>
<script type="text/javascript">
function codesearchQuery(form) {
var query = document.getElementById('q').value;
if (query) { form.action += '%20' + query; }
}
</script>
</div>
</div>
</td>
<td align="right" valign="top" class="bevel-right"></td>
</tr>
</table>
<script type="text/javascript">
var cancelBubble = false;
function _go(url) { document.location = url; }
</script>
<div id="maincol"
>
<div class="expand">
<div id="colcontrol">
<style type="text/css">
#file_flipper { white-space: nowrap; padding-right: 2em; }
#file_flipper.hidden { display: none; }
#file_flipper .pagelink { color: #0000CC; text-decoration: underline; }
#file_flipper #visiblefiles { padding-left: 0.5em; padding-right: 0.5em; }
</style>
<table id="nav_and_rev" class="list"
cellpadding="0" cellspacing="0" width="100%">
<tr>
<td nowrap="nowrap" class="src_crumbs src_nav" width="33%">
<strong class="src_nav">Source path: </strong>
<span id="crumb_root">
<a href="/p/stanford-cs193g-sp2010/source/browse/">svn</a>/ </span>
<span id="crumb_links" class="ifClosed"><a href="/p/stanford-cs193g-sp2010/source/browse/trunk/">trunk</a><span class="sp">/ </span><a href="/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/">tutorials</a><span class="sp">/ </span>block_scan.cu</span>
</td>
<td nowrap="nowrap" width="33%" align="center">
<a href="/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/block_scan.cu?edit=1"
><img src="https://ssl.gstatic.com/codesite/ph/images/pencil-y14.png"
class="edit_icon">Edit file</a>
</td>
<td nowrap="nowrap" width="33%" align="right">
<table cellpadding="0" cellspacing="0" style="font-size: 100%"><tr>
<td class="flipper"><b>r285</b></td>
</tr></table>
</td>
</tr>
</table>
<div class="fc">
<style type="text/css">
.undermouse span {
background-image: url(https://ssl.gstatic.com/codesite/ph/images/comments.gif); }
</style>
<table class="opened" id="review_comment_area"
><tr>
<td id="nums">
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
<pre><table width="100%" id="nums_table_0"><tr id="gr_svn285_1"
><td id="1"><a href="#1">1</a></td></tr
><tr id="gr_svn285_2"
><td id="2"><a href="#2">2</a></td></tr
><tr id="gr_svn285_3"
><td id="3"><a href="#3">3</a></td></tr
><tr id="gr_svn285_4"
><td id="4"><a href="#4">4</a></td></tr
><tr id="gr_svn285_5"
><td id="5"><a href="#5">5</a></td></tr
><tr id="gr_svn285_6"
><td id="6"><a href="#6">6</a></td></tr
><tr id="gr_svn285_7"
><td id="7"><a href="#7">7</a></td></tr
><tr id="gr_svn285_8"
><td id="8"><a href="#8">8</a></td></tr
><tr id="gr_svn285_9"
><td id="9"><a href="#9">9</a></td></tr
><tr id="gr_svn285_10"
><td id="10"><a href="#10">10</a></td></tr
><tr id="gr_svn285_11"
><td id="11"><a href="#11">11</a></td></tr
><tr id="gr_svn285_12"
><td id="12"><a href="#12">12</a></td></tr
><tr id="gr_svn285_13"
><td id="13"><a href="#13">13</a></td></tr
><tr id="gr_svn285_14"
><td id="14"><a href="#14">14</a></td></tr
><tr id="gr_svn285_15"
><td id="15"><a href="#15">15</a></td></tr
><tr id="gr_svn285_16"
><td id="16"><a href="#16">16</a></td></tr
><tr id="gr_svn285_17"
><td id="17"><a href="#17">17</a></td></tr
><tr id="gr_svn285_18"
><td id="18"><a href="#18">18</a></td></tr
><tr id="gr_svn285_19"
><td id="19"><a href="#19">19</a></td></tr
><tr id="gr_svn285_20"
><td id="20"><a href="#20">20</a></td></tr
><tr id="gr_svn285_21"
><td id="21"><a href="#21">21</a></td></tr
><tr id="gr_svn285_22"
><td id="22"><a href="#22">22</a></td></tr
><tr id="gr_svn285_23"
><td id="23"><a href="#23">23</a></td></tr
><tr id="gr_svn285_24"
><td id="24"><a href="#24">24</a></td></tr
><tr id="gr_svn285_25"
><td id="25"><a href="#25">25</a></td></tr
><tr id="gr_svn285_26"
><td id="26"><a href="#26">26</a></td></tr
><tr id="gr_svn285_27"
><td id="27"><a href="#27">27</a></td></tr
><tr id="gr_svn285_28"
><td id="28"><a href="#28">28</a></td></tr
><tr id="gr_svn285_29"
><td id="29"><a href="#29">29</a></td></tr
><tr id="gr_svn285_30"
><td id="30"><a href="#30">30</a></td></tr
><tr id="gr_svn285_31"
><td id="31"><a href="#31">31</a></td></tr
><tr id="gr_svn285_32"
><td id="32"><a href="#32">32</a></td></tr
><tr id="gr_svn285_33"
><td id="33"><a href="#33">33</a></td></tr
><tr id="gr_svn285_34"
><td id="34"><a href="#34">34</a></td></tr
><tr id="gr_svn285_35"
><td id="35"><a href="#35">35</a></td></tr
><tr id="gr_svn285_36"
><td id="36"><a href="#36">36</a></td></tr
><tr id="gr_svn285_37"
><td id="37"><a href="#37">37</a></td></tr
><tr id="gr_svn285_38"
><td id="38"><a href="#38">38</a></td></tr
><tr id="gr_svn285_39"
><td id="39"><a href="#39">39</a></td></tr
><tr id="gr_svn285_40"
><td id="40"><a href="#40">40</a></td></tr
><tr id="gr_svn285_41"
><td id="41"><a href="#41">41</a></td></tr
><tr id="gr_svn285_42"
><td id="42"><a href="#42">42</a></td></tr
><tr id="gr_svn285_43"
><td id="43"><a href="#43">43</a></td></tr
><tr id="gr_svn285_44"
><td id="44"><a href="#44">44</a></td></tr
><tr id="gr_svn285_45"
><td id="45"><a href="#45">45</a></td></tr
><tr id="gr_svn285_46"
><td id="46"><a href="#46">46</a></td></tr
><tr id="gr_svn285_47"
><td id="47"><a href="#47">47</a></td></tr
><tr id="gr_svn285_48"
><td id="48"><a href="#48">48</a></td></tr
><tr id="gr_svn285_49"
><td id="49"><a href="#49">49</a></td></tr
><tr id="gr_svn285_50"
><td id="50"><a href="#50">50</a></td></tr
><tr id="gr_svn285_51"
><td id="51"><a href="#51">51</a></td></tr
><tr id="gr_svn285_52"
><td id="52"><a href="#52">52</a></td></tr
><tr id="gr_svn285_53"
><td id="53"><a href="#53">53</a></td></tr
><tr id="gr_svn285_54"
><td id="54"><a href="#54">54</a></td></tr
><tr id="gr_svn285_55"
><td id="55"><a href="#55">55</a></td></tr
><tr id="gr_svn285_56"
><td id="56"><a href="#56">56</a></td></tr
><tr id="gr_svn285_57"
><td id="57"><a href="#57">57</a></td></tr
><tr id="gr_svn285_58"
><td id="58"><a href="#58">58</a></td></tr
><tr id="gr_svn285_59"
><td id="59"><a href="#59">59</a></td></tr
><tr id="gr_svn285_60"
><td id="60"><a href="#60">60</a></td></tr
><tr id="gr_svn285_61"
><td id="61"><a href="#61">61</a></td></tr
><tr id="gr_svn285_62"
><td id="62"><a href="#62">62</a></td></tr
><tr id="gr_svn285_63"
><td id="63"><a href="#63">63</a></td></tr
><tr id="gr_svn285_64"
><td id="64"><a href="#64">64</a></td></tr
><tr id="gr_svn285_65"
><td id="65"><a href="#65">65</a></td></tr
><tr id="gr_svn285_66"
><td id="66"><a href="#66">66</a></td></tr
><tr id="gr_svn285_67"
><td id="67"><a href="#67">67</a></td></tr
><tr id="gr_svn285_68"
><td id="68"><a href="#68">68</a></td></tr
><tr id="gr_svn285_69"
><td id="69"><a href="#69">69</a></td></tr
><tr id="gr_svn285_70"
><td id="70"><a href="#70">70</a></td></tr
><tr id="gr_svn285_71"
><td id="71"><a href="#71">71</a></td></tr
><tr id="gr_svn285_72"
><td id="72"><a href="#72">72</a></td></tr
><tr id="gr_svn285_73"
><td id="73"><a href="#73">73</a></td></tr
><tr id="gr_svn285_74"
><td id="74"><a href="#74">74</a></td></tr
><tr id="gr_svn285_75"
><td id="75"><a href="#75">75</a></td></tr
><tr id="gr_svn285_76"
><td id="76"><a href="#76">76</a></td></tr
><tr id="gr_svn285_77"
><td id="77"><a href="#77">77</a></td></tr
><tr id="gr_svn285_78"
><td id="78"><a href="#78">78</a></td></tr
><tr id="gr_svn285_79"
><td id="79"><a href="#79">79</a></td></tr
><tr id="gr_svn285_80"
><td id="80"><a href="#80">80</a></td></tr
><tr id="gr_svn285_81"
><td id="81"><a href="#81">81</a></td></tr
><tr id="gr_svn285_82"
><td id="82"><a href="#82">82</a></td></tr
><tr id="gr_svn285_83"
><td id="83"><a href="#83">83</a></td></tr
><tr id="gr_svn285_84"
><td id="84"><a href="#84">84</a></td></tr
><tr id="gr_svn285_85"
><td id="85"><a href="#85">85</a></td></tr
><tr id="gr_svn285_86"
><td id="86"><a href="#86">86</a></td></tr
><tr id="gr_svn285_87"
><td id="87"><a href="#87">87</a></td></tr
><tr id="gr_svn285_88"
><td id="88"><a href="#88">88</a></td></tr
><tr id="gr_svn285_89"
><td id="89"><a href="#89">89</a></td></tr
><tr id="gr_svn285_90"
><td id="90"><a href="#90">90</a></td></tr
><tr id="gr_svn285_91"
><td id="91"><a href="#91">91</a></td></tr
><tr id="gr_svn285_92"
><td id="92"><a href="#92">92</a></td></tr
><tr id="gr_svn285_93"
><td id="93"><a href="#93">93</a></td></tr
><tr id="gr_svn285_94"
><td id="94"><a href="#94">94</a></td></tr
><tr id="gr_svn285_95"
><td id="95"><a href="#95">95</a></td></tr
><tr id="gr_svn285_96"
><td id="96"><a href="#96">96</a></td></tr
><tr id="gr_svn285_97"
><td id="97"><a href="#97">97</a></td></tr
></table></pre>
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
</td>
<td id="lines">
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
<pre ><table id="src_table_0"><tr
id=sl_svn285_1
><td class="source">// This example demonstrates a block-wise inclusive<br></td></tr
><tr
id=sl_svn285_2
><td class="source">// parallel prefix sum (scan) algorithm.<br></td></tr
><tr
id=sl_svn285_3
><td class="source"><br></td></tr
><tr
id=sl_svn285_4
><td class="source">#include <stdlib.h><br></td></tr
><tr
id=sl_svn285_5
><td class="source">#include <stdio.h><br></td></tr
><tr
id=sl_svn285_6
><td class="source">#include <vector><br></td></tr
><tr
id=sl_svn285_7
><td class="source">#include <iostream><br></td></tr
><tr
id=sl_svn285_8
><td class="source"><br></td></tr
><tr
id=sl_svn285_9
><td class="source"><br></td></tr
><tr
id=sl_svn285_10
><td class="source">// This kernel computes, per-block, a block-sized scan<br></td></tr
><tr
id=sl_svn285_11
><td class="source">// of the input. It assumes that the block size evenly<br></td></tr
><tr
id=sl_svn285_12
><td class="source">// divides the input size<br></td></tr
><tr
id=sl_svn285_13
><td class="source">__global__ void inclusive_scan(const unsigned int *input,<br></td></tr
><tr
id=sl_svn285_14
><td class="source"> unsigned int *result)<br></td></tr
><tr
id=sl_svn285_15
><td class="source">{<br></td></tr
><tr
id=sl_svn285_16
><td class="source"> extern __shared__ unsigned int sdata[];<br></td></tr
><tr
id=sl_svn285_17
><td class="source"><br></td></tr
><tr
id=sl_svn285_18
><td class="source"> unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;<br></td></tr
><tr
id=sl_svn285_19
><td class="source"><br></td></tr
><tr
id=sl_svn285_20
><td class="source"> // load input into __shared__ memory<br></td></tr
><tr
id=sl_svn285_21
><td class="source"> unsigned int sum = input[i];<br></td></tr
><tr
id=sl_svn285_22
><td class="source"> sdata[threadIdx.x] = sum;<br></td></tr
><tr
id=sl_svn285_23
><td class="source"> __syncthreads();<br></td></tr
><tr
id=sl_svn285_24
><td class="source"> for(int offset = 1; offset < blockDim.x; offset <<= 1)<br></td></tr
><tr
id=sl_svn285_25
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_26
><td class="source"> if(threadIdx.x >= offset)<br></td></tr
><tr
id=sl_svn285_27
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_28
><td class="source"> sum += sdata[threadIdx.x - offset];<br></td></tr
><tr
id=sl_svn285_29
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_30
><td class="source"><br></td></tr
><tr
id=sl_svn285_31
><td class="source"> // wait until every thread has updated its partial sum<br></td></tr
><tr
id=sl_svn285_32
><td class="source"> __syncthreads();<br></td></tr
><tr
id=sl_svn285_33
><td class="source"><br></td></tr
><tr
id=sl_svn285_34
><td class="source"> // write my partial sum<br></td></tr
><tr
id=sl_svn285_35
><td class="source"> sdata[threadIdx.x] = sum;<br></td></tr
><tr
id=sl_svn285_36
><td class="source"><br></td></tr
><tr
id=sl_svn285_37
><td class="source"> // wait until every thread has written its partial sum<br></td></tr
><tr
id=sl_svn285_38
><td class="source"> __syncthreads();<br></td></tr
><tr
id=sl_svn285_39
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_40
><td class="source"><br></td></tr
><tr
id=sl_svn285_41
><td class="source"> // we're done! each thread writes out its result<br></td></tr
><tr
id=sl_svn285_42
><td class="source"> result[i] = sdata[threadIdx.x];<br></td></tr
><tr
id=sl_svn285_43
><td class="source">}<br></td></tr
><tr
id=sl_svn285_44
><td class="source"> <br></td></tr
><tr
id=sl_svn285_45
><td class="source"><br></td></tr
><tr
id=sl_svn285_46
><td class="source">int main(void)<br></td></tr
><tr
id=sl_svn285_47
><td class="source">{<br></td></tr
><tr
id=sl_svn285_48
><td class="source"> // use small input sizes for illustrative purposes<br></td></tr
><tr
id=sl_svn285_49
><td class="source"> const int num_blocks = 4;<br></td></tr
><tr
id=sl_svn285_50
><td class="source"> const int block_size = 16;<br></td></tr
><tr
id=sl_svn285_51
><td class="source"> const int num_elements = num_blocks * block_size;<br></td></tr
><tr
id=sl_svn285_52
><td class="source"><br></td></tr
><tr
id=sl_svn285_53
><td class="source"> // generate random input in [0,5] on the host<br></td></tr
><tr
id=sl_svn285_54
><td class="source"> std::vector<unsigned int> h_input(num_elements);<br></td></tr
><tr
id=sl_svn285_55
><td class="source"> for(unsigned int i = 0; i < num_elements; ++i)<br></td></tr
><tr
id=sl_svn285_56
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_57
><td class="source"> h_input[i] = rand() % 6;<br></td></tr
><tr
id=sl_svn285_58
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_59
><td class="source"><br></td></tr
><tr
id=sl_svn285_60
><td class="source"> // copy input to device memory<br></td></tr
><tr
id=sl_svn285_61
><td class="source"> unsigned int *d_input = 0;<br></td></tr
><tr
id=sl_svn285_62
><td class="source"> cudaMalloc((void**)&d_input, sizeof(unsigned int) * num_elements);<br></td></tr
><tr
id=sl_svn285_63
><td class="source"> cudaMemcpy(d_input, &h_input[0], sizeof(unsigned int) * num_elements, cudaMemcpyHostToDevice);<br></td></tr
><tr
id=sl_svn285_64
><td class="source"><br></td></tr
><tr
id=sl_svn285_65
><td class="source"> // allocate space for the result<br></td></tr
><tr
id=sl_svn285_66
><td class="source"> unsigned int *d_result = 0;<br></td></tr
><tr
id=sl_svn285_67
><td class="source"> cudaMalloc((void**)&d_result, sizeof(unsigned int) * num_elements);<br></td></tr
><tr
id=sl_svn285_68
><td class="source"><br></td></tr
><tr
id=sl_svn285_69
><td class="source"> inclusive_scan<<<num_blocks, block_size, block_size * sizeof(unsigned int)>>>(d_input, d_result);<br></td></tr
><tr
id=sl_svn285_70
><td class="source"><br></td></tr
><tr
id=sl_svn285_71
><td class="source"> // copy result to host memory<br></td></tr
><tr
id=sl_svn285_72
><td class="source"> std::vector<unsigned int> h_result(num_elements);<br></td></tr
><tr
id=sl_svn285_73
><td class="source"> cudaMemcpy(&h_result[0], d_result, sizeof(unsigned int) * num_elements, cudaMemcpyDeviceToHost);<br></td></tr
><tr
id=sl_svn285_74
><td class="source"><br></td></tr
><tr
id=sl_svn285_75
><td class="source"> // print out the results<br></td></tr
><tr
id=sl_svn285_76
><td class="source"> for(int b = 0; b < num_blocks; ++b)<br></td></tr
><tr
id=sl_svn285_77
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_78
><td class="source"> std::cout << "Block " << b << std::endl << std::endl;<br></td></tr
><tr
id=sl_svn285_79
><td class="source"><br></td></tr
><tr
id=sl_svn285_80
><td class="source"> std::cout << "Input: " << std::endl;<br></td></tr
><tr
id=sl_svn285_81
><td class="source"> for(int i = 0; i < block_size; ++i)<br></td></tr
><tr
id=sl_svn285_82
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_83
><td class="source"> printf("%2d ", h_input[b * block_size + i]);<br></td></tr
><tr
id=sl_svn285_84
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_85
><td class="source"> std::cout << std::endl;<br></td></tr
><tr
id=sl_svn285_86
><td class="source"><br></td></tr
><tr
id=sl_svn285_87
><td class="source"> std::cout << "Result: " << std::endl;<br></td></tr
><tr
id=sl_svn285_88
><td class="source"> for(int i = 0; i < block_size; ++i)<br></td></tr
><tr
id=sl_svn285_89
><td class="source"> {<br></td></tr
><tr
id=sl_svn285_90
><td class="source"> printf("%2d ", h_result[b * block_size + i]);<br></td></tr
><tr
id=sl_svn285_91
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_92
><td class="source"> std::cout << std::endl << std::endl << std::endl;<br></td></tr
><tr
id=sl_svn285_93
><td class="source"> }<br></td></tr
><tr
id=sl_svn285_94
><td class="source"><br></td></tr
><tr
id=sl_svn285_95
><td class="source"> return 0;<br></td></tr
><tr
id=sl_svn285_96
><td class="source">}<br></td></tr
><tr
id=sl_svn285_97
><td class="source"><br></td></tr
></table></pre>
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
</td>
</tr></table>
<script type="text/javascript">
var lineNumUnderMouse = -1;
function gutterOver(num) {
gutterOut();
var newTR = document.getElementById('gr_svn285_' + num);
if (newTR) {
newTR.className = 'undermouse';
}
lineNumUnderMouse = num;
}
function gutterOut() {
if (lineNumUnderMouse != -1) {
var oldTR = document.getElementById(
'gr_svn285_' + lineNumUnderMouse);
if (oldTR) {
oldTR.className = '';
}
lineNumUnderMouse = -1;
}
}
var numsGenState = {table_base_id: 'nums_table_'};
var srcGenState = {table_base_id: 'src_table_'};
var alignerRunning = false;
var startOver = false;
function setLineNumberHeights() {
if (alignerRunning) {
startOver = true;
return;
}
numsGenState.chunk_id = 0;
numsGenState.table = document.getElementById('nums_table_0');
numsGenState.row_num = 0;
if (!numsGenState.table) {
return; // Silently exit if no file is present.
}
srcGenState.chunk_id = 0;
srcGenState.table = document.getElementById('src_table_0');
srcGenState.row_num = 0;
alignerRunning = true;
continueToSetLineNumberHeights();
}
function rowGenerator(genState) {
if (genState.row_num < genState.table.rows.length) {
var currentRow = genState.table.rows[genState.row_num];
genState.row_num++;
return currentRow;
}
var newTable = document.getElementById(
genState.table_base_id + (genState.chunk_id + 1));
if (newTable) {
genState.chunk_id++;
genState.row_num = 0;
genState.table = newTable;
return genState.table.rows[0];
}
return null;
}
var MAX_ROWS_PER_PASS = 1000;
function continueToSetLineNumberHeights() {
var rowsInThisPass = 0;
var numRow = 1;
var srcRow = 1;
while (numRow && srcRow && rowsInThisPass < MAX_ROWS_PER_PASS) {
numRow = rowGenerator(numsGenState);
srcRow = rowGenerator(srcGenState);
rowsInThisPass++;
if (numRow && srcRow) {
if (numRow.offsetHeight != srcRow.offsetHeight) {
numRow.firstChild.style.height = srcRow.offsetHeight + 'px';
}
}
}
if (rowsInThisPass >= MAX_ROWS_PER_PASS) {
setTimeout(continueToSetLineNumberHeights, 10);
} else {
alignerRunning = false;
if (startOver) {
startOver = false;
setTimeout(setLineNumberHeights, 500);
}
}
}
function initLineNumberHeights() {
// Do 2 complete passes, because there can be races
// between this code and prettify.
startOver = true;
setTimeout(setLineNumberHeights, 250);
window.onresize = setLineNumberHeights;
}
initLineNumberHeights();
</script>
<div id="log">
<div style="text-align:right">
<a class="ifCollapse" href="#" onclick="_toggleMeta(this); return false">Show details</a>
<a class="ifExpand" href="#" onclick="_toggleMeta(this); return false">Hide details</a>
</div>
<div class="ifExpand">
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="changelog">
<p>Change log</p>
<div>
<a href="/p/stanford-cs193g-sp2010/source/detail?spec=svn285&r=223">r223</a>
by jaredhoberock
on Apr 14, 2010
<a href="/p/stanford-cs193g-sp2010/source/diff?spec=svn285&r=223&format=side&path=/trunk/tutorials/block_scan.cu&old_path=/trunk/tutorials/block_scan.cu&old=">Diff</a>
</div>
<pre>Add block scan example code.
</pre>
</div>
<script type="text/javascript">
var detail_url = '/p/stanford-cs193g-sp2010/source/detail?r=223&spec=svn285';
var publish_url = '/p/stanford-cs193g-sp2010/source/detail?r=223&spec=svn285#publish';
// describe the paths of this revision in javascript.
var changed_paths = [];
var changed_urls = [];
changed_paths.push('/trunk/tutorials/block_scan.cu');
changed_urls.push('/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/block_scan.cu?r\x3d223\x26spec\x3dsvn285');
var selected_path = '/trunk/tutorials/block_scan.cu';
function getCurrentPageIndex() {
for (var i = 0; i < changed_paths.length; i++) {
if (selected_path == changed_paths[i]) {
return i;
}
}
}
function getNextPage() {
var i = getCurrentPageIndex();
if (i < changed_paths.length - 1) {
return changed_urls[i + 1];
}
return null;
}
function getPreviousPage() {
var i = getCurrentPageIndex();
if (i > 0) {
return changed_urls[i - 1];
}
return null;
}
function gotoNextPage() {
var page = getNextPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoPreviousPage() {
var page = getPreviousPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoDetailPage() {
window.location = detail_url;
}
function gotoPublishPage() {
window.location = publish_url;
}
</script>
<style type="text/css">
#review_nav {
border-top: 3px solid white;
padding-top: 6px;
margin-top: 1em;
}
#review_nav td {
vertical-align: middle;
}
#review_nav select {
margin: .5em 0;
}
</style>
<div id="review_nav">
<table><tr><td>Go to: </td><td>
<select name="files_in_rev" onchange="window.location=this.value">
<option value="/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/block_scan.cu?r=223&spec=svn285"
selected="selected"
>/trunk/tutorials/block_scan.cu</option>
</select>
</td></tr></table>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="older_bubble">
<p>Older revisions</p>
<a href="/p/stanford-cs193g-sp2010/source/list?path=/trunk/tutorials/block_scan.cu&start=223">All revisions of this file</a>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="fileinfo_bubble">
<p>File info</p>
<div>Size: 2767 bytes,
97 lines</div>
<div><a href="//stanford-cs193g-sp2010.googlecode.com/svn/trunk/tutorials/block_scan.cu">View raw file</a></div>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
</div>
</div>
</div>
</div>
</div>
<script src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/source_file_scripts.js"></script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/kibbles.js"></script>
<script type="text/javascript">
var lastStop = null;
var initialized = false;
function updateCursor(next, prev) {
if (prev && prev.element) {
prev.element.className = 'cursor_stop cursor_hidden';
}
if (next && next.element) {
next.element.className = 'cursor_stop cursor';
lastStop = next.index;
}
}
function pubRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftDestroyed(data) {
updateCursorForCell(data.cellId, 'nocursor');
if (initialized) {
reloadCursors();
}
}
function reloadCursors() {
kibbles.skipper.reset();
loadCursors();
if (lastStop != null) {
kibbles.skipper.setCurrentStop(lastStop);
}
}
// possibly the simplest way to insert any newly added comments
// is to update the class of the corresponding cursor row,
// then refresh the entire list of rows.
function updateCursorForCell(cellId, className) {
var cell = document.getElementById(cellId);
// we have to go two rows back to find the cursor location
var row = getPreviousElement(cell.parentNode);
row.className = className;
}
// returns the previous element, ignores text nodes.
function getPreviousElement(e) {
var element = e.previousSibling;
if (element.nodeType == 3) {
element = element.previousSibling;
}
if (element && element.tagName) {
return element;
}
}
function loadCursors() {
// register our elements with skipper
var elements = CR_getElements('*', 'cursor_stop');
var len = elements.length;
for (var i = 0; i < len; i++) {
var element = elements[i];
element.className = 'cursor_stop cursor_hidden';
kibbles.skipper.append(element);
}
}
function toggleComments() {
CR_toggleCommentDisplay();
reloadCursors();
}
function keysOnLoadHandler() {
// setup skipper
kibbles.skipper.addStopListener(
kibbles.skipper.LISTENER_TYPE.PRE, updateCursor);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_top', 50);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_bottom', 100);
// Register our keys
kibbles.skipper.addFwdKey("n");
kibbles.skipper.addRevKey("p");
kibbles.keys.addKeyPressListener(
'u', function() { window.location = detail_url; });
kibbles.keys.addKeyPressListener(
'r', function() { window.location = detail_url + '#publish'; });
kibbles.keys.addKeyPressListener('j', gotoNextPage);
kibbles.keys.addKeyPressListener('k', gotoPreviousPage);
}
</script>
<script src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/code_review_scripts.js"></script>
<script type="text/javascript">
function showPublishInstructions() {
var element = document.getElementById('review_instr');
if (element) {
element.className = 'opened';
}
}
var codereviews;
function revsOnLoadHandler() {
// register our source container with the commenting code
var paths = {'svn285': '/trunk/tutorials/block_scan.cu'}
codereviews = CR_controller.setup(
{"domainName":null,"assetHostPath":"https://ssl.gstatic.com/codesite/ph","profileUrl":"/u/116699586124044253698/","token":"MBDwIWyWnVeH7W7374m1cEh0iI0:1371808304867","relativeBaseUrl":"","projectName":"stanford-cs193g-sp2010","loggedInUserEmail":"gaurav.sachin007@gmail.com","assetVersionPath":"https://ssl.gstatic.com/codesite/ph/18376132045800511552","projectHomeUrl":"/p/stanford-cs193g-sp2010"}, '', 'svn285', paths,
CR_BrowseIntegrationFactory);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, showPublishInstructions);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_PUB_PLATE, pubRevealed);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, draftRevealed);
codereviews.registerActivityListener(CR_ActivityType.DISCARD_DRAFT_COMMENT, draftDestroyed);
var initialized = true;
reloadCursors();
}
window.onload = function() {keysOnLoadHandler(); revsOnLoadHandler();};
</script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/dit_scripts.js"></script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/18376132045800511552/js/ph_core.js"></script>
</div>
<div id="footer" dir="ltr">
<div class="text">
<a href="/projecthosting/terms.html">Terms</a> -
<a href="http://www.google.com/privacy.html">Privacy</a> -
<a href="/p/support/">Project Hosting Help</a>
</div>
</div>
<div class="hostedBy" style="margin-top: -20px;">
<span style="vertical-align: top;">Powered by <a href="http://code.google.com/projecthosting/">Google Project Hosting</a></span>
</div>
</body>
</html>
|
e44cd7d87a12f38c3cb5c62108515594abe05abe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv.cu, normal z -> d, Thu Oct 8 23:05:35 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_d
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "dtrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ double shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
dtrsv_notrans_kernel_outplace(
int n,
const double * __restrict__ A, int lda,
double *b, int incb,
double *x)
{
dtrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
dtrsv_trans_kernel_outplace(
int n,
const double * __restrict__ A, int lda,
double *b, int incb,
double *x)
{
dtrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_dtrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaDouble_ptr b, magma_int_t incb,
magmaDouble_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(double);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the dtrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_dtrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaDouble_ptr b, magma_int_t incb,
magmaDouble_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_dlaset( MagmaFull, n, incb, MAGMA_D_ZERO, MAGMA_D_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_dgemv will cause slow down
magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, 0), lda,
x, 1, MAGMA_D_ONE, x+col, 1, queue );
}
magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(0, col), lda, x, 1, MAGMA_D_ONE, x+col, 1, queue );
}
magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
dtrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA DOUBLE PRECISION array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db DOUBLE PRECISION array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_dtrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaDouble_ptr dx=NULL;
magma_dmalloc( &dx, size_x );
magmablas_dlaset( MagmaFull, n, 1, MAGMA_D_ZERO, MAGMA_D_ZERO, dx, n, queue );
magmablas_dtrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_dlacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
| e44cd7d87a12f38c3cb5c62108515594abe05abe.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv.cu, normal z -> d, Thu Oct 8 23:05:35 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_d
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "dtrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ double shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
dtrsv_notrans_kernel_outplace(
int n,
const double * __restrict__ A, int lda,
double *b, int incb,
double *x)
{
dtrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
dtrsv_trans_kernel_outplace(
int n,
const double * __restrict__ A, int lda,
double *b, int incb,
double *x)
{
dtrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_dtrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaDouble_ptr b, magma_int_t incb,
magmaDouble_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(double);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
dtrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the dtrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_dtrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaDouble_ptr b, magma_int_t incb,
magmaDouble_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_dlaset( MagmaFull, n, incb, MAGMA_D_ZERO, MAGMA_D_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_dgemv will cause slow down
magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_dgemv( MagmaNoTrans, jb, i, MAGMA_D_ONE, A(col, 0), lda,
x, 1, MAGMA_D_ONE, x+col, 1, queue );
}
magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_D_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_dgemv( MagmaConjTrans, i, jb, MAGMA_D_ONE, A(0, col), lda, x, 1, MAGMA_D_ONE, x+col, 1, queue );
}
magmablas_dtrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
dtrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA DOUBLE PRECISION array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db DOUBLE PRECISION array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_dtrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaDouble_ptr dx=NULL;
magma_dmalloc( &dx, size_x );
magmablas_dlaset( MagmaFull, n, 1, MAGMA_D_ZERO, MAGMA_D_ZERO, dx, n, queue );
magmablas_dtrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_dlacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
|
7fd48d7a703691a9b8dede2bfe3627143cdb98e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
constexpr size_t kSize = 1000000;
class Stopwatch {
public:
using TimePoint = decltype(std::chrono::high_resolution_clock::now());
Stopwatch(): start(std::chrono::high_resolution_clock::now()) {}
~Stopwatch() {
end = std::chrono::high_resolution_clock::now();
std::cout << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< " us\n";
}
private:
TimePoint start;
TimePoint end;
};
void Transfer0(float* orig, float* target0, float* target1, float* target2) {
hipMemcpy(target0, orig, kSize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(target1, orig, kSize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(target2, orig, kSize * sizeof(float), hipMemcpyHostToDevice);
};
void Transfer1(float* orig, float* target0, float* target1, float* target2) {
hipStream_t stream[3];
float* targets[3] = {target0, target1, target2};
for (int i = 0; i < 3; ++i) {
hipStreamCreate(&stream[i]);
}
for (int i = 0; i < 3; ++i) {
hipMemcpyAsync(targets[i], orig, kSize * sizeof(float), hipMemcpyHostToDevice, stream[i]);
}
for (int i = 0; i < 3; ++i) {
hipStreamDestroy(stream[i]);
}
};
int main() {
float* original = new float[kSize];
float* target0, *target1, *target2;
hipMalloc(&target0, sizeof(float) * kSize);
hipMalloc(&target1, sizeof(float) * kSize);
hipMalloc(&target2, sizeof(float) * kSize);
{
Stopwatch s;
Transfer0(original, target0, target1, target2);
}
{
Stopwatch s;
Transfer1(original, target0, target1, target2);
}
hipFree(target0);
hipFree(target1);
hipFree(target2);
return 0;
} | 7fd48d7a703691a9b8dede2bfe3627143cdb98e0.cu | #include <iostream>
#include <chrono>
constexpr size_t kSize = 1000000;
class Stopwatch {
public:
using TimePoint = decltype(std::chrono::high_resolution_clock::now());
Stopwatch(): start(std::chrono::high_resolution_clock::now()) {}
~Stopwatch() {
end = std::chrono::high_resolution_clock::now();
std::cout << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< " us\n";
}
private:
TimePoint start;
TimePoint end;
};
void Transfer0(float* orig, float* target0, float* target1, float* target2) {
cudaMemcpy(target0, orig, kSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(target1, orig, kSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(target2, orig, kSize * sizeof(float), cudaMemcpyHostToDevice);
};
void Transfer1(float* orig, float* target0, float* target1, float* target2) {
cudaStream_t stream[3];
float* targets[3] = {target0, target1, target2};
for (int i = 0; i < 3; ++i) {
cudaStreamCreate(&stream[i]);
}
for (int i = 0; i < 3; ++i) {
cudaMemcpyAsync(targets[i], orig, kSize * sizeof(float), cudaMemcpyHostToDevice, stream[i]);
}
for (int i = 0; i < 3; ++i) {
cudaStreamDestroy(stream[i]);
}
};
int main() {
float* original = new float[kSize];
float* target0, *target1, *target2;
cudaMalloc(&target0, sizeof(float) * kSize);
cudaMalloc(&target1, sizeof(float) * kSize);
cudaMalloc(&target2, sizeof(float) * kSize);
{
Stopwatch s;
Transfer0(original, target0, target1, target2);
}
{
Stopwatch s;
Transfer1(original, target0, target1, target2);
}
cudaFree(target0);
cudaFree(target1);
cudaFree(target2);
return 0;
} |
696e587b8fb89a5d5ef1808817c015f4dfe6e9fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_mass_flux_x;
int xdim0_advec_mom_kernel_mass_flux_x_h = -1;
__constant__ int ydim0_advec_mom_kernel_mass_flux_x;
int ydim0_advec_mom_kernel_mass_flux_x_h = -1;
__constant__ int xdim1_advec_mom_kernel_mass_flux_x;
int xdim1_advec_mom_kernel_mass_flux_x_h = -1;
__constant__ int ydim1_advec_mom_kernel_mass_flux_x;
int ydim1_advec_mom_kernel_mass_flux_x_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_mass_flux_x * (y) + \
xdim0_advec_mom_kernel_mass_flux_x * ydim0_advec_mom_kernel_mass_flux_x * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_mass_flux_x * (y) + \
xdim1_advec_mom_kernel_mass_flux_x * ydim1_advec_mom_kernel_mass_flux_x * \
(z))
// user function
__device__
inline void
advec_mom_kernel_mass_flux_x_gpu(double *node_flux,
const double *mass_flux_x) {
node_flux[OPS_ACC0(0, 0, 0)] =
0.125 *
(mass_flux_x[OPS_ACC1(0, -1, 0)] + mass_flux_x[OPS_ACC1(0, 0, 0)] +
mass_flux_x[OPS_ACC1(1, -1, 0)] + mass_flux_x[OPS_ACC1(1, 0, 0)] +
mass_flux_x[OPS_ACC1(0, -1, -1)] + mass_flux_x[OPS_ACC1(0, 0, -1)] +
mass_flux_x[OPS_ACC1(1, -1, -1)] + mass_flux_x[OPS_ACC1(1, 0, -1)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_advec_mom_kernel_mass_flux_x(double *__restrict arg0,
const double *__restrict arg1,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_x +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_x *
ydim0_advec_mom_kernel_mass_flux_x;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_x +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_x *
ydim1_advec_mom_kernel_mass_flux_x;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_mass_flux_x_gpu(arg0, arg1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_x(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1) {
#else
void ops_par_loop_advec_mom_kernel_mass_flux_x_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 2, range, 126))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(126, "advec_mom_kernel_mass_flux_x");
OPS_kernels[126].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_mass_flux_x_h ||
ydim0 != ydim0_advec_mom_kernel_mass_flux_x_h ||
xdim1 != xdim1_advec_mom_kernel_mass_flux_x_h ||
ydim1 != ydim1_advec_mom_kernel_mass_flux_x_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel_mass_flux_x, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_mass_flux_x_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel_mass_flux_x, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_mass_flux_x_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel_mass_flux_x, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_mass_flux_x_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel_mass_flux_x, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_mass_flux_x_h = ydim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[126].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel_mass_flux_x), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[126].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[126].mpi_time += t2 - t1;
OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_x(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 126;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 126;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_mass_flux_x_execute;
if (OPS_diags > 1) {
ops_timing_realloc(126, "advec_mom_kernel_mass_flux_x");
}
ops_enqueue_kernel(desc);
}
#endif
| 696e587b8fb89a5d5ef1808817c015f4dfe6e9fb.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_mass_flux_x;
int xdim0_advec_mom_kernel_mass_flux_x_h = -1;
__constant__ int ydim0_advec_mom_kernel_mass_flux_x;
int ydim0_advec_mom_kernel_mass_flux_x_h = -1;
__constant__ int xdim1_advec_mom_kernel_mass_flux_x;
int xdim1_advec_mom_kernel_mass_flux_x_h = -1;
__constant__ int ydim1_advec_mom_kernel_mass_flux_x;
int ydim1_advec_mom_kernel_mass_flux_x_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_mass_flux_x * (y) + \
xdim0_advec_mom_kernel_mass_flux_x * ydim0_advec_mom_kernel_mass_flux_x * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_mass_flux_x * (y) + \
xdim1_advec_mom_kernel_mass_flux_x * ydim1_advec_mom_kernel_mass_flux_x * \
(z))
// user function
__device__
inline void
advec_mom_kernel_mass_flux_x_gpu(double *node_flux,
const double *mass_flux_x) {
node_flux[OPS_ACC0(0, 0, 0)] =
0.125 *
(mass_flux_x[OPS_ACC1(0, -1, 0)] + mass_flux_x[OPS_ACC1(0, 0, 0)] +
mass_flux_x[OPS_ACC1(1, -1, 0)] + mass_flux_x[OPS_ACC1(1, 0, 0)] +
mass_flux_x[OPS_ACC1(0, -1, -1)] + mass_flux_x[OPS_ACC1(0, 0, -1)] +
mass_flux_x[OPS_ACC1(1, -1, -1)] + mass_flux_x[OPS_ACC1(1, 0, -1)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_advec_mom_kernel_mass_flux_x(double *__restrict arg0,
const double *__restrict arg1,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_x +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_mass_flux_x *
ydim0_advec_mom_kernel_mass_flux_x;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_x +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_mass_flux_x *
ydim1_advec_mom_kernel_mass_flux_x;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_mass_flux_x_gpu(arg0, arg1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_x(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1) {
#else
void ops_par_loop_advec_mom_kernel_mass_flux_x_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 2, range, 126))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(126, "advec_mom_kernel_mass_flux_x");
OPS_kernels[126].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_mass_flux_x_h ||
ydim0 != ydim0_advec_mom_kernel_mass_flux_x_h ||
xdim1 != xdim1_advec_mom_kernel_mass_flux_x_h ||
ydim1 != ydim1_advec_mom_kernel_mass_flux_x_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel_mass_flux_x, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_mass_flux_x_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel_mass_flux_x, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_mass_flux_x_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel_mass_flux_x, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_mass_flux_x_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel_mass_flux_x, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_mass_flux_x_h = ydim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[126].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel_mass_flux_x<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[126].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[126].mpi_time += t2 - t1;
OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[126].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_x(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 126;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 126;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_mass_flux_x_execute;
if (OPS_diags > 1) {
ops_timing_realloc(126, "advec_mom_kernel_mass_flux_x");
}
ops_enqueue_kernel(desc);
}
#endif
|
f607d5b1e480753d49f28e31589712e5759352ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
__global__ void initObject(ChiVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col, int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
//for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(ChiVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
*/
#include "parse_oo.h"
void initContext(GraphChiContext *context, int vertices, int edges) {
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
void part0_initObject(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
vertex[tid] =
(ChiVertex<int, int> *)alloc->calloc<ChiVertex<int, int>>(1);
}
}
void part1_initObject(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
// } else {
// in_end = context->getNumEdges();
// }
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<myType> *)alloc->my_new<Edge<myType>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<myType> **)alloc->my_new<Edge<myType> *>(outdegree);
// new (&vertex[tid]) ChiVertex<int, int>(tid, indegree,
// outdegree,alloc);
vertex[tid]->set_in_out(alloc);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void part_kern0_initObject(ChiVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
}
__global__ void part_kern1_initObject(ChiVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
void initOutEdge(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__global__ void kern_initObject(ChiVertex<int, int> *vertex,
GraphChiContext *context, int *row, int *col,
int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
}
//}
}
__global__ void kern_initOutEdge(ChiVertex<int, int> **vertex,
GraphChiContext *context, int *row, int *col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__managed__ obj_info_tuble *vfun_table;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_TP;
__global__ void ConnectedComponent(ChiVertex<int, int> **vertex,
GraphChiContext *context, int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **vtable;
if (tid < context->getNumVertices()) {
int numEdges;
numEdges = vertex[tid]->numEdges();
if (iteration == 0) {
int vid = vertex[tid]->getId();
vertex[tid]->setValue(vid);
}
int curMin;
curMin = vertex[tid]->getValue();
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int nbLabel;
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[1];
nbLabel = CLEANPTR(edge, ChiEdge<int> *)->getValue();
if (iteration == 0) {
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[0];
nbLabel =
CLEANPTR(edge, ChiEdge<int> *)->getVertexId(); // Note!
}
if (nbLabel < curMin) {
curMin = nbLabel;
}
}
/**
* Set my new label
*/
vertex[tid]->setValue(curMin);
int label = curMin;
/**
* Broadcast my value to neighbors by writing the value to my edges.
*/
if (iteration > 0) {
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int edgeValue;
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[1];
edgeValue = CLEANPTR(edge, ChiEdge<int> *)->getValue();
if (edgeValue > label) {
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[2];
CLEANPTR(edge, ChiEdge<int> *)->setValue(label);
}
}
} else {
// Special case for first iteration to avoid overwriting
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
vtable = get_vfunc_type(outEdge, vfun_table);
temp_TP = vtable[2];
CLEANPTR(outEdge, ChiEdge<int> *)->setValue(label);
}
}
}
}
__global__ void copyBack(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *cc) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
cc[tid] = vertex[tid]->getValue();
}
}
| f607d5b1e480753d49f28e31589712e5759352ad.cu | /*
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
__global__ void initObject(ChiVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col, int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
//for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(ChiVertex<int, int> **vertex, GraphChiContext*
context,
int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
*/
#include "parse_oo.h"
void initContext(GraphChiContext *context, int vertices, int edges) {
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
void part0_initObject(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
vertex[tid] =
(ChiVertex<int, int> *)alloc->calloc<ChiVertex<int, int>>(1);
}
}
void part1_initObject(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col, int *inrow, int *incol,
obj_alloc *alloc) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
// int out_start = row[tid];
// int out_end;
// if (tid + 1 < context->getNumVertices()) {
// out_end = row[tid + 1];
// } else {
// out_end = context->getNumEdges();
// }
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
// } else {
// in_end = context->getNumEdges();
// }
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid].inEdgeDataArray =
// (ChiEdge<myType> *)alloc->my_new<Edge<myType>>(indegree);
// vertex[tid].outEdgeDataArray =
// (ChiEdge<myType> **)alloc->my_new<Edge<myType> *>(outdegree);
// new (&vertex[tid]) ChiVertex<int, int>(tid, indegree,
// outdegree,alloc);
vertex[tid]->set_in_out(alloc);
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void part_kern0_initObject(ChiVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
// vertex[tid].setValue(INT_MAX);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
// }
}
__global__ void part_kern1_initObject(ChiVertex<int, int> **vertex,
GraphChiContext *context, int *row,
int *col, int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
void initOutEdge(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *row, int *col) {
int tid = 0;
for (tid = 0; tid < context->getNumVertices(); tid++) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__global__ void kern_initObject(ChiVertex<int, int> *vertex,
GraphChiContext *context, int *row, int *col,
int *inrow, int *incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
new (&vertex[tid]) ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid].setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid].setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
}
//}
}
__global__ void kern_initOutEdge(ChiVertex<int, int> **vertex,
GraphChiContext *context, int *row, int *col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__managed__ obj_info_tuble *vfun_table;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_TP;
__global__ void ConnectedComponent(ChiVertex<int, int> **vertex,
GraphChiContext *context, int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **vtable;
if (tid < context->getNumVertices()) {
int numEdges;
numEdges = vertex[tid]->numEdges();
if (iteration == 0) {
int vid = vertex[tid]->getId();
vertex[tid]->setValue(vid);
}
int curMin;
curMin = vertex[tid]->getValue();
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int nbLabel;
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[1];
nbLabel = CLEANPTR(edge, ChiEdge<int> *)->getValue();
if (iteration == 0) {
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[0];
nbLabel =
CLEANPTR(edge, ChiEdge<int> *)->getVertexId(); // Note!
}
if (nbLabel < curMin) {
curMin = nbLabel;
}
}
/**
* Set my new label
*/
vertex[tid]->setValue(curMin);
int label = curMin;
/**
* Broadcast my value to neighbors by writing the value to my edges.
*/
if (iteration > 0) {
for (int i = 0; i < numEdges; i++) {
ChiEdge<int> *edge;
edge = vertex[tid]->edge(i);
int edgeValue;
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[1];
edgeValue = CLEANPTR(edge, ChiEdge<int> *)->getValue();
if (edgeValue > label) {
vtable = get_vfunc_type(edge, vfun_table);
temp_TP = vtable[2];
CLEANPTR(edge, ChiEdge<int> *)->setValue(label);
}
}
} else {
// Special case for first iteration to avoid overwriting
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int> *outEdge;
outEdge = vertex[tid]->getOutEdge(i);
vtable = get_vfunc_type(outEdge, vfun_table);
temp_TP = vtable[2];
CLEANPTR(outEdge, ChiEdge<int> *)->setValue(label);
}
}
}
}
__global__ void copyBack(ChiVertex<int, int> **vertex, GraphChiContext *context,
int *cc) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
cc[tid] = vertex[tid]->getValue();
}
}
|
5337747547efb40b98fbabcfbc1826f6099a4b7d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// DONE-Part 4
thrust::device_vector<int> dv_in = thrust::host_vector<int>(idata, idata + n);
thrust::device_vector<int> dv_out(n);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy_n(dv_out.begin(), n, odata);
}
}
}
| 5337747547efb40b98fbabcfbc1826f6099a4b7d.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// DONE-Part 4
thrust::device_vector<int> dv_in = thrust::host_vector<int>(idata, idata + n);
thrust::device_vector<int> dv_out(n);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy_n(dv_out.begin(), n, odata);
}
}
}
|
1ee92c4520fc5368fec934b7253eb008e208fcd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
#include "NeighborListGPUTree_hip.cuh"
#include "hoomd/TextureTools.h"
#include "hoomd/extern/cub/hipcub/hipcub.hpp"
#define MORTON_CODE_BITS 30 //!< Length of the Morton code in bits (k = 10 bits per direction)
#define MORTON_CODE_N_BINS 1024 //!< Number of bins (2^10) per direction to generate 30 bit Morton codes
#define MORTON_TYPE_MASK_64 0x000000003fffffffu //!< 64 bit mask to turn morton code-type back to morton code
/*! \file NeighborListGPUTree.cu
\brief Defines GPU kernel code for neighbor list tree traversal on the GPU
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading leaf data
scalar4_tex_t leaf_xyzf_tex;
//! Texture for the diameter / body
scalar2_tex_t leaf_db_tex;
//! Texture for reading node upper and lower bounds
scalar4_tex_t aabb_node_bounds_tex;
//! Texture for the head list
texture<unsigned int, 1, hipReadModeElementType> head_list_tex;
//!< Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit.
/*!
* \param v unsigned integer with 10 bits set
* \returns The integer expanded with two zeros interleaved between bits
* http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/
*/
__device__ inline unsigned int expandBits(unsigned int v)
{
v = (v * 0x00010001u) & 0xFF0000FFu;
v = (v * 0x00000101u) & 0x0F00F00Fu;
v = (v * 0x00000011u) & 0xC30C30C3u;
v = (v * 0x00000005u) & 0x49249249u;
return v;
}
//! Assigns the Morton code-type key for each particle on this processor
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
*
* \b Implementation
* A sorting key is generated for each particle by determining the 30 bit Morton code for each particle, and then
* concatenating onto the type. Both the Morton code and the type are 32 bit integers, so the concatenation is stored
* compactly in a 64 bit integer morton_type = (type << 30) + morton code. In this way, a lexicographic sort will
* sort first by type, then by morton code. The corresponding particle id (thread index) is stashed into d_map_tree_pid
* to track particles after sorting.
*/
__global__ void gpu_nlist_morton_types_kernel(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim box,
const Scalar3 ghost_width)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N+nghosts)
return;
// acquire particle data
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const unsigned int type = __scalar_as_int(postype.w);
// get position in simulation box
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
/* check if the particle is inside the unit cell + ghost layer in all dimensions
* this tolerance is small enough that when we multiply by the morton code bin size, we are still in range
* we silently ignore ghosts outside of this width, and instead deal with that special case below
* where extra ghosts are communicated (e.g. for bonded interactions)
*/
if (((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001))) && idx < N)
{
atomicMax(d_morton_conditions,idx+1);
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * MORTON_CODE_N_BINS);
int jb = (int)(f.y * MORTON_CODE_N_BINS);
int kb = (int)(f.z * MORTON_CODE_N_BINS);
if (!periodic.x) // ghosts exist and may be past layer width
{
// handle special cases where random ghosts are beyond the expected layer
// by just rounding to the nearest edge
if (ib < 0)
{
ib = 0;
}
else if (ib >= MORTON_CODE_N_BINS)
{
ib = MORTON_CODE_N_BINS - 1;
}
}
else if (ib == MORTON_CODE_N_BINS) // some particles lie exactly on the edge, floor them to zero
{
ib = 0;
}
// do as for x in y
if (!periodic.y)
{
if (jb < 0)
{
jb = 0;
}
else if (jb >= MORTON_CODE_N_BINS)
{
jb = MORTON_CODE_N_BINS - 1;
}
}
else if (jb == MORTON_CODE_N_BINS)
{
jb = 0;
}
// do as for y in z
if (!periodic.z)
{
if (kb < 0)
{
kb = 0;
}
else if (kb >= MORTON_CODE_N_BINS)
{
kb = MORTON_CODE_N_BINS - 1;
}
}
else if (kb == MORTON_CODE_N_BINS)
{
kb = 0;
}
// inline call to some bit swizzling arithmetic
unsigned int ii = expandBits((unsigned int)ib);
unsigned int jj = expandBits((unsigned int)jb);
unsigned int kk = expandBits((unsigned int)kb);
unsigned int morton_code = ii * 4 + jj * 2 + kk;
// save the morton code and corresponding particle index for sorting
// the morton codes hold both the type and the code to sort by both type and position simultaneously
d_morton_types[idx] = (((uint64_t)type) << MORTON_CODE_BITS) + (uint64_t)morton_code;
d_map_tree_pid[idx] = idx;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
* \param block_size Requested thread block size of kernel launch
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_morton_types(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim& box,
const Scalar3 ghost_width,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_morton_types_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
hipLaunchKernelGGL(( gpu_nlist_morton_types_kernel), dim3((N+nghosts)/run_block_size + 1), dim3(run_block_size), 0, 0, d_morton_types,
d_map_tree_pid,
d_morton_conditions,
d_pos,
N,
nghosts,
box,
ghost_width);
return hipSuccess;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_morton_types_alt Auxiliary array of equal size to d_morton_types for double buffered sorting
* \param d_map_tree_pid List of particle ids
* \param d_map_tree_pid_alt Auxiliary array of equal size to d_map_tree_pid for double buffered sorting
* \param d_tmp_storage Temporary storage in device memory
* \param tmp_storage_bytes Number of bytes allocated for temporary storage
* \param swap_morton Flag to switch real data from auxiliary array to primary array after sorting
* \param swap_map Flag to switch real data from auxiliary array to primary array after sorting
* \param Ntot Total number of keys to sort
* \param n_type_bits Number of bits to check for particle types
*
* \returns hipSuccess on completion
*
* \b Implementation
* The CUB library is used for device-wide radix sorting. Radix sorting is O(kN) where k is the number of bits to check
* in an unsigned integer key, and N is the number of keys. We restrict the number of bits checked in the max 64 bit
* keys by only checking up to the MORTON_CODE_BITS + n_type_bits most significant bit. CUB DeviceRadixSort performs
* its own tuning at run time.
*
* Because CUB requires temporary storage, this function must be called twice. First, when \a d_tmp_storage is NULL,
* the number of bytes required for temporary storage is saved in \a tmp_storage_bytes. This memory must then be
* allocated in \a d_tmp_storage. On the second call, the radix sort is performed. Because the radix sort may put the
* active (sorted) buffer in either slot of the DoubleBuffer, a boolean flag is set in \a swap_morton and \a swap_map
* for whether these data arrays should be swapped.
*/
hipError_t gpu_nlist_morton_sort(uint64_t *d_morton_types,
uint64_t *d_morton_types_alt,
unsigned int *d_map_tree_pid,
unsigned int *d_map_tree_pid_alt,
void *d_tmp_storage,
size_t &tmp_storage_bytes,
bool &swap_morton,
bool &swap_map,
const unsigned int Ntot,
const unsigned int n_type_bits)
{
// initialize memory as "double buffered"
cub::DoubleBuffer<uint64_t> d_keys(d_morton_types, d_morton_types_alt);
cub::DoubleBuffer<unsigned int> d_vals(d_map_tree_pid, d_map_tree_pid_alt);
// on the first pass, this just sizes the temporary storage
// on the second pass, it actually does the radix sort
hipcub::DeviceRadixSort::SortPairs(d_tmp_storage,
tmp_storage_bytes,
d_keys,
d_vals,
Ntot,
0,
MORTON_CODE_BITS+n_type_bits);
// we've only done something to the buffers on the second time when temporary storage is allocated
if (d_tmp_storage != NULL)
{
// mark that the gpu arrays should be flipped if the final result is not in the right array
swap_morton = (d_keys.selector == 1);
swap_map = (d_vals.selector == 1);
}
return hipSuccess;
}
//! Kernel to merge adjacent codes into leaf nodes
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread per leaf is called, and is responsible for merging NLIST_GPU_PARTICLES_PER_LEAF into an AABB. Each thread
* first determines what type of leaf particle it is operating on by calculating and iterating on the number of leafs
* of each type. Then, the starting index is determined by subtracting d_leaf_offset[type] from the starting index that
* would be set in a nleaf x NLIST_GPU_PARTICLES_PER_LEAF array. The reason for this complexity is that the leaf particle
* array is not permitted to have any "holes" in it for faster traversal. The AABB is merged from the particle
* positions, and a Morton code is assigned to this AABB for determining tree hierarchy based on the Morton code of
* the first particle in the leaf. Although this does not necessarily generate the best ordering along the Z order curve
* for the newly merged leafs, it does guarantee that the leaf Morton codes are still in lexicographic ordering.
*
* AABBs are stored as two Scalar4s in a flat array. The first three coordinates of each Scalar4 correspond to the upper
* and lower bounds of the AABB. The last value of the upper AABB will hold a "rope" for traversing the tree (see
* gpu_nlist_bubble_aabbs_kernel), while the last value of the lower AABB holds the number of particles for a leaf node,
* or the left child for an internal node. This is determined by setting a bit to mark this value as a rope or as child.
*/
__global__ void gpu_nlist_merge_particles_kernel(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs)
{
// leaf index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per leaf
if (idx >= nleafs)
return;
// get what type of leaf I am
unsigned int total_bins = 0;
int leaf_type = -1;
unsigned int max_idx = Ntot;
for (unsigned int cur_type=0; leaf_type == -1 && cur_type < ntypes; ++cur_type)
{
total_bins += (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (idx < total_bins)
{
leaf_type = cur_type;
for (unsigned int next_type=cur_type+1; next_type < ntypes; ++next_type)
{
if (d_type_head[next_type])
{
max_idx = d_type_head[next_type] - 1;
break; // quit out of this inner loop once a match is found
}
}
break; // quit the outer loop
}
}
// get the starting particle index assuming naive leaf structure, and then subtract offset to eliminate "holes"
unsigned int start_idx = idx*NLIST_GPU_PARTICLES_PER_LEAF - d_leaf_offset[leaf_type];
unsigned int end_idx = (max_idx - start_idx > NLIST_GPU_PARTICLES_PER_LEAF) ? start_idx + NLIST_GPU_PARTICLES_PER_LEAF : max_idx;
// upper also holds the skip value, but we have no idea what this is right now
Scalar4 upper = d_pos[ d_map_tree_pid[start_idx] ];
upper.w = 0.0f;
// lower holds the particle number, we have one already
Scalar4 lower = upper;
unsigned int npart = 1;
for (unsigned int cur_p=start_idx+1; cur_p < end_idx; ++cur_p)
{
Scalar4 cur_pos = d_pos[ d_map_tree_pid[cur_p] ];
// merge the boxes together
if (cur_pos.x < lower.x) lower.x = cur_pos.x;
if (cur_pos.x > upper.x) upper.x = cur_pos.x;
if (cur_pos.y < lower.y) lower.y = cur_pos.y;
if (cur_pos.y > upper.y) upper.y = cur_pos.y;
if (cur_pos.z < lower.z) lower.z = cur_pos.z;
if (cur_pos.z > upper.z) upper.z = cur_pos.z;
++npart;
}
d_tree_aabbs[2*idx] = upper;
d_tree_aabbs[2*idx + 1] = make_scalar4(lower.x, lower.y, lower.z, __int_as_scalar(npart << 1));
// take logical AND with the 30 bit mask for the morton codes to extract just the morton code
// no sense swinging around 64 bit integers anymore
d_morton_codes_red[idx] = (unsigned int)(d_morton_types[start_idx] & MORTON_TYPE_MASK_64);
// fill the parent/sib relationships as if everything is a single leaf at first, to be overridden by hierarchy gen
// when this is not the case
d_tree_parent_sib[idx] = make_uint2(idx, idx << 1);
}
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_merge_particles(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_merge_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
hipLaunchKernelGGL(( gpu_nlist_merge_particles_kernel), dim3(nleafs/run_block_size + 1), dim3(block_size), 0, 0, d_tree_aabbs,
d_morton_codes_red,
d_tree_parent_sib,
d_morton_types,
d_pos,
d_num_per_type,
ntypes,
d_map_tree_pid,
d_leaf_offset,
d_type_head,
Ntot,
nleafs);
return hipSuccess;
}
//! Computes the longest common prefix between Morton codes
/*!
* \param d_morton_codes Array of Morton codes
* \param i First Morton code index
* \param j Second Morton code index
* \param min_idx The smallest index considered "in range" (inclusive)
* \param max_idx The last index considered "in range" (inclusive)
*
* \returns number of bits shared between the Morton codes of i and j
*
* delta(i,j) is defined as the largest number of bits shared between Morton codes i and j. When the Morton codes are
* sorted, this implies delta(i',j') >= delta(i,j) for any i',j' in [i,j]. If i and j lie outside
* of the range of Morton codes corresponding to this tree, then it always returns -1. If the Morton codes for i and j
* are identical, then the longest prefix of i and j is used as a tie breaker.
*/
__device__ inline int delta(const uint32_t *d_morton_codes,
unsigned int i,
unsigned int j,
int min_idx,
int max_idx)
{
if (j > max_idx || j < min_idx)
{
return -1;
}
uint32_t first_code = d_morton_codes[i];
uint32_t last_code = d_morton_codes[j];
// if codes match, then use index as tie breaker
// the number of shared bits is equal to the 32 bits in the integer, plus the number of bits shared between the
// indexes (offset from the start of the node range to make things simpler)
if (first_code == last_code)
{
return (32 + __clz((i-min_idx) ^ (j-min_idx)));
}
else
{
return __clz(first_code ^ last_code);
}
}
//! Determines the range of Morton codes that a node covers
/*!
* \param d_morton_codes Array of Morton codes
* \param min_idx The smallest Morton code index considered "in range" (inclusive)
* \param max_idx The last Morton code index considered "in range" (inclusive)
* \param idx Current node (Morton code) index
*
* \returns the minimum and maximum leafs covered by this node
* \note This is a literal implementation of the Karras pseudocode, with no optimizations or refinement.
* Tero Karras, "Maximizing parallelism in the construction of BVHs, octrees, and k-d trees",
* High Performance Graphics (2012).
*/
__device__ inline uint2 determineRange(const uint32_t *d_morton_codes,
const int min_idx,
const int max_idx,
const int idx)
{
int forward_prefix = delta(d_morton_codes, idx, idx+1, min_idx, max_idx);
int backward_prefix = delta(d_morton_codes, idx, idx-1, min_idx, max_idx);
// get direction of the range based on sign
int d = ((forward_prefix - backward_prefix) > 0) ? 1 : -1;
// get minimum prefix
int min_prefix = delta(d_morton_codes, idx, idx-d, min_idx, max_idx);
// get maximum prefix by binary search
int lmax = 2;
while( delta(d_morton_codes, idx, idx + d*lmax, min_idx, max_idx) > min_prefix)
{
lmax = lmax << 1;
}
unsigned int len = 0;
unsigned int step = lmax;
do
{
step = step >> 1;
unsigned int new_len = len + step;
if (delta(d_morton_codes, idx, idx + d*new_len, min_idx, max_idx) > min_prefix)
len = new_len;
}
while (step > 1);
// order range based on direction
uint2 range;
if (d > 0)
{
range.x = idx;
range.y = idx + len;
}
else
{
range.x = idx - len;
range.y = idx;
}
return range;
}
//! Finds the split position in Morton codes covered by a range
/*!
* \param d_morton_codes Array of Morton codes
* \param first First leaf node in the range
* \param last Last leaf node in the range
*
* \returns the leaf index corresponding to the split in Morton codes
* See determineRange for original source of algorithm.
*/
__device__ inline unsigned int findSplit(const uint32_t *d_morton_codes,
const unsigned int first,
const unsigned int last)
{
uint32_t first_code = d_morton_codes[first];
uint32_t last_code = d_morton_codes[last];
// if codes match, then just split evenly
if (first_code == last_code)
return (first + last) >> 1;
// get the length of the common prefix
int common_prefix = __clz(first_code ^ last_code);
// assume split starts at first, and begin binary search
unsigned int split = first;
unsigned int step = last - first;
do
{
// exponential decrease (is factor of 2 best?)
step = (step + 1) >> 1;
unsigned int new_split = split + step;
// if proposed split lies within range
if (new_split < last)
{
unsigned int split_code = d_morton_codes[new_split];
int split_prefix = __clz(first_code ^ split_code);
// if new split shares a longer number of bits, accept it
if (split_prefix > common_prefix)
{
split = new_split;
}
}
}
while (step > 1);
return split;
}
//! Kernel to generate the parent-child-sibling relationships between nodes
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
*
* \b Implementation
* One thread is called per internal node in a single kernel launch. Each thread first determines its "local" index
* as an internal node within a tree based on the number of leafs per tree. The range of leafs covered by the internal
* node is determined, and then its split position is identified. The split identifies the children of the node as
* another internal node or as a leaf node.
*
* The parent and sibling of each child node is saved. The sibling id is bit shifted so as to use a single bit to encode
* the sibling as a right child or left child (after shifting, we set the bit to 1 if the sibling is a right child).
* If the child is a root node, it also saves information for itself (since no other node ever identifies a root as a
* child node).
*/
__global__ void gpu_nlist_gen_hierarchy_kernel(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal)
{
// compute the internal node index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per internal node
if (idx >= ninternal)
return;
// get what type of leaf I am
unsigned int min_idx = 0; // the "0" of the leaf node array
unsigned int max_idx = 0; // the "N-1" of the leaf node array
unsigned int node_idx = idx;
unsigned int origin = 0;
unsigned int end = 0;
unsigned int cur_type=0;
unsigned int active_types=0;
for (cur_type=0; cur_type < ntypes; ++cur_type)
{
// current min index is the previous max index
min_idx = max_idx;
// max index adds the number of internal nodes in this type (nleaf - 1)
const unsigned int cur_nleaf = (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (cur_nleaf > 0)
{
max_idx += cur_nleaf-1;
++active_types;
}
// we break the loop if we are in range
if (idx < max_idx)
{
// decrement by 1 to get this back into the number we really need
--active_types;
// now, we repurpose the min and max index to now correspond to the *leaf* index.
// the min index is the minimum *leaf* index
origin = min_idx + active_types;
end = max_idx + active_types;
node_idx += active_types;
break;
}
}
// enact the magical split determining
uint2 range = determineRange(d_morton_codes, origin, end, node_idx);
unsigned int first = range.x;
unsigned int last = range.y;
unsigned int split = findSplit(d_morton_codes, first, last);
uint2 children;
// set the children, shifting ahead by nleafs - cur_type to account for leaf shifting
// this factor comes out from resetting 0 = N_leaf,i each time, and then remapping this to
// an internal node
children.x = (split == first) ? split : (nleafs - active_types + split);
children.y = ((split + 1) == last) ? (split + 1) : nleafs - active_types + split + 1;
uint2 parent_sib;
parent_sib.x = nleafs + idx;
// encode the sibling as the right child
parent_sib.y = children.y << 1;
parent_sib.y |= 1;
d_tree_parent_sib[children.x] = parent_sib;
// encode the sibling as the left child
parent_sib.y = children.x << 1;
d_tree_parent_sib[children.y] = parent_sib;
// root is always number "zero", but only it can set its parent / sibling
// we mark both of these as the root for traversing, since only the root node
// will be its own sibling
if (node_idx == origin)
{
parent_sib.x = nleafs + idx;
parent_sib.y = (nleafs + idx) << 1;
d_tree_parent_sib[nleafs + idx] = parent_sib;
}
}
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_gen_hierarchy(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_gen_hierarchy_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// one thread per internal node
hipLaunchKernelGGL(( gpu_nlist_gen_hierarchy_kernel), dim3(ninternal/run_block_size + 1), dim3(run_block_size), 0, 0, d_tree_parent_sib,
d_morton_codes,
d_num_per_type,
ntypes,
nleafs,
ninternal);
return hipSuccess;
}
//! Kernel to bubble up enclosing AABBs to internal nodes from leaf nodes
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread is called per leaf node. The second thread to reach an internal node processes its two children,
* which guarantees that no node AABB is prematurely processed. The arrival order at a node is controlled by an atomic
* thread lock in global memory. This locking could be accelerated by using shared memory whenever a node is being
* processed by threads in the same block.
*
* When processing the node, the thread also walks up the tree to find the "rope" that tells a traverser
* how to navigate the tree. If a query AABB intersects the current node, then the traverser always moves the the left
* child of the current node. If the AABB does not intersect, it moves along the "rope" to the next portion of the tree.
* The "rope" is calculated by walking back up the tree to find the earliest ancestor that is a left child of its
* parent. The rope then goes to that ancestor's sibling. If the root node is reached, then the rope is set to -1 to
* indicate traversal should be aborted.
*
* This kernel also encodes the left child of a node into the AABB for internal nodes. The thread processing the node
* checks if it arrived from a left child or right child of the node it is processing, and sets the left child of that
* parent accordingly. A child is indicated by bit shifting, and setting the first bit to 1.
*/
__global__ void gpu_nlist_bubble_aabbs_kernel(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs)
{
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nleafs)
return;
// okay, first we start from the leaf and set my bounding box
Scalar4 cur_upper = d_tree_aabbs[2*idx];
Scalar4 cur_lower = d_tree_aabbs[2*idx+1];
// zero the counters for internal nodes
cur_upper.w = 0.0f;
cur_lower.w = 0.0f;
unsigned int cur_node = idx;
unsigned int lock_key = 0;
do
{
uint2 cur_parent_sib = d_tree_parent_sib[cur_node];
unsigned int cur_parent = cur_parent_sib.x;
// if the current sibling is a right child, then the current node is a left child
bool cur_is_left = (cur_parent_sib.y & 1);
unsigned int cur_sibling = cur_parent_sib.y >> 1;
// first we compute the skip for this node always
// back track up the tree until you find a left child
// we have a check in place so that we don't stall on the root node
uint2 backtrack = cur_parent_sib;
while (!(backtrack.y & 1) && backtrack.x != (backtrack.y >> 1))
{
backtrack = d_tree_parent_sib[backtrack.x];
}
// then, the skip is to the sibling of that node, or else to quit
if (backtrack.y & 1)
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(backtrack.y >> 1);
}
else
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(-1);
}
// then, we do an atomicAdd on the lock to see if we need to process the parent AABBs
// check to make sure the parent is bigger than nleafs, or else the node lock always fails
// so that we terminate the thread
lock_key = (cur_parent >= nleafs) ? atomicAdd(d_node_locks + cur_parent - nleafs, 1) : 0;
// process the node
if (lock_key == 1)
{
// compute the max upper bound
Scalar4 sib_upper = d_tree_aabbs[2*cur_sibling];
if (sib_upper.x > cur_upper.x) cur_upper.x = sib_upper.x;
if (sib_upper.y > cur_upper.y) cur_upper.y = sib_upper.y;
if (sib_upper.z > cur_upper.z) cur_upper.z = sib_upper.z;
d_tree_aabbs[2*cur_parent] = cur_upper;
// compute the min lower bound
Scalar4 sib_lower = d_tree_aabbs[2*cur_sibling+1];
if (sib_lower.x < cur_lower.x) cur_lower.x = sib_lower.x;
if (sib_lower.y < cur_lower.y) cur_lower.y = sib_lower.y;
if (sib_lower.z < cur_lower.z) cur_lower.z = sib_lower.z;
// this must always be some internal node, so stash the left child of this node here
unsigned int left_child_masked = ((cur_is_left ? cur_node : cur_sibling) << 1) | 1;
cur_lower.w = __int_as_scalar( left_child_masked );
d_tree_aabbs[2*cur_parent+1] = cur_lower;
// bump the current node one level
cur_node = cur_parent;
}
}
while (lock_key == 1);
}
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_bubble_aabbs(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
hipMemset(d_node_locks, 0, sizeof(unsigned int)*ninternal);
hipLaunchKernelGGL(( gpu_nlist_bubble_aabbs_kernel), dim3(nleafs/block_size + 1), dim3(block_size), 0, 0, d_node_locks,
d_tree_aabbs,
d_tree_parent_sib,
ntypes,
nleafs);
return hipSuccess;
}
//! Kernel to rearrange particle data into leaf order for faster traversal
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
*
* \b Implementation
* One thread per particle is called. Writes are coalesced by writing in leaf order, and reading in a scattered way.
*/
__global__ void gpu_nlist_move_particles_kernel(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot)
{
// get thread index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= Ntot)
return;
// read and write particle data
unsigned int p_idx = d_map_tree_pid[idx];
Scalar4 pos_i = d_pos[p_idx];
d_leaf_xyzf[idx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __int_as_scalar(p_idx));
Scalar2 db = make_scalar2(d_diameter[p_idx], __int_as_scalar(d_body[p_idx]));
d_leaf_db[idx] = db;
}
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_move_particles(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_move_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
hipLaunchKernelGGL(( gpu_nlist_move_particles_kernel), dim3(Ntot/run_block_size + 1), dim3(run_block_size), 0, 0, d_leaf_xyzf,
d_leaf_db,
d_pos,
d_diameter,
d_body,
d_map_tree_pid,
Ntot);
return hipSuccess;
}
//! Kernel for traversing tree to generate neighbor list
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
*
* \b Implementation
* One thread is launched per particle, but the threads operate on particles in leaf order rather than ParticleData
* order in order to minimize divergence within a warp (particles in the same leaf should intersect similar parts of the
* tree). Each thread iterates on the particle types (trees) and queries on all translation vectors using a stackless
* search. When the query AABB intersects a node AABB, the node AABB is checked to be an internal node or a leaf node.
* If an internal node, then the traversal advances to that node's left child. If a leaf node, the leaf particles are
* tested directly to be included in the neighbor list. The node then advances along that leaf node's rope. If the AABB
* is not intersected, the traversal advances along the rope. This process proceeds until a rope signals that the
* traversal is complete.
*/
template<unsigned char flags>
__global__ void gpu_nlist_traverse_tree_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
const Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
unsigned int *s_leaf_offset = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters + sizeof(unsigned int)*ntypes]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
s_leaf_offset[cur_offset + threadIdx.x] = d_leaf_offset[cur_offset + threadIdx.x];
}
}
__syncthreads();
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the leaf list
if (idx >= (N+nghosts))
return;
// read in the current position
unsigned int my_pidx = d_map_tree_pid[idx];
// we only process particles owned by this processor for neighbors
if (my_pidx >= N)
return;
const Scalar4 postype_i = texFetchScalar4(d_pos, pdata_pos_tex, my_pidx);
const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z);
const unsigned int type_i = __scalar_as_int(postype_i.w);
// fetch the diameter and body out of the leaf texture since it's bound anyway
const Scalar2 db_i = texFetchScalar2(d_leaf_db, leaf_db_tex, idx);
const Scalar diam_i = db_i.x;
const unsigned int body_i = __scalar_as_int(db_i.y);
const unsigned int nlist_head_i = texFetchUint(d_head_list, head_list_tex, my_pidx);
unsigned int n_neigh_i = 0;
for (unsigned int cur_pair_type=0; cur_pair_type < ntypes; ++cur_pair_type)
{
// Check primary box
const Scalar r_cut_i = s_r_list[typpair_idx(type_i,cur_pair_type)];
// Skip this tree type if it is not needed
if (r_cut_i <= Scalar(0.0))
continue;
// stash the r_cutsq before any diameter shifting
const Scalar r_cutsq_i = r_cut_i*r_cut_i;
// the rlist to use for the AABB search has to be at least as big as the biggest diameter
Scalar r_list_i = r_cut_i;
if (diameter_shift)
r_list_i += max_diam - Scalar(1.0);
const unsigned int cur_tree_root = d_tree_roots[cur_pair_type];
// skip this type if we don't have it
if (cur_tree_root == NLIST_GPU_INVALID_NODE)
continue;
for (unsigned int cur_image = 0; cur_image < nimages; ++cur_image)
{
const Scalar3 pos_i_image = pos_i + d_image_list[cur_image];
const Scalar3 aabb_upper = make_scalar3(pos_i_image.x + r_list_i,
pos_i_image.y + r_list_i,
pos_i_image.z + r_list_i);
const Scalar3 aabb_lower = make_scalar3(pos_i_image.x - r_list_i,
pos_i_image.y - r_list_i,
pos_i_image.z - r_list_i);
// stackless search
int cur_node_idx = cur_tree_root;
while (cur_node_idx > -1)
{
const Scalar4 upper_rope = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx);
const Scalar4 lower_np = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx+1);
if (!(aabb_upper.x < lower_np.x
|| aabb_lower.x > upper_rope.x
|| aabb_upper.y < lower_np.y
|| aabb_lower.y > upper_rope.y
|| aabb_upper.z < lower_np.z
|| aabb_lower.z > upper_rope.z))
{
const unsigned int np_child_masked = __scalar_as_int(lower_np.w);
if(!(np_child_masked & 1))
{
// leaf node
// all leaves must have at least 1 particle, so we can use this to decide
const unsigned int node_head = NLIST_GPU_PARTICLES_PER_LEAF*cur_node_idx - s_leaf_offset[cur_pair_type];
const unsigned int n_part = np_child_masked >> 1;
for (unsigned int cur_p = node_head; cur_p < node_head + n_part; ++cur_p)
{
// neighbor j
const Scalar4 cur_xyzf = texFetchScalar4(d_leaf_xyzf, leaf_xyzf_tex, cur_p);
const Scalar3 pos_j = make_scalar3(cur_xyzf.x, cur_xyzf.y, cur_xyzf.z);
const unsigned int j = __scalar_as_int(cur_xyzf.w);
const Scalar2 cur_db = texFetchScalar2(d_leaf_db, leaf_db_tex, cur_p);
const Scalar diam_j = cur_db.x;
const unsigned int body_j = __scalar_as_int(cur_db.y);
bool excluded = (my_pidx == j);
if (filter_body && body_i != 0xffffffff)
excluded = excluded | (body_i == body_j);
if (!excluded)
{
// now we can trim down the actual particles based on diameter
// compute the shift for the cutoff if not excluded
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (diam_i + diam_j) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_cut_i) * delta;
}
// compute distance and wrap back into box
Scalar3 drij = pos_j - pos_i_image;
Scalar dr2 = dot(drij,drij);
if (dr2 <= (r_cutsq_i + sqshift))
{
if (n_neigh_i < s_Nmax[type_i])
{
d_nlist[nlist_head_i + n_neigh_i] = j;
}
++n_neigh_i;
}
}
}
// leaf nodes always move to their rope
cur_node_idx = __scalar_as_int(upper_rope.w);
}
else
{
// internal node, take left child
cur_node_idx = (np_child_masked >> 1);
}
}
else
{
cur_node_idx = __scalar_as_int(upper_rope.w); // no overlap, rope ahead
}
} // end stackless search
} // end loop over images
} // end loop over pair types
// could try reordering by idx instead of pidx, but that seems to not make much difference in microbenchmarking.
d_n_neigh[my_pidx] = n_neigh_i;
d_last_updated_pos[my_pidx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __scalar_as_int(type_i));
// update the number of neighbors for this type if allocated memory is exceeded
if (n_neigh_i >= s_Nmax[type_i])
atomicMax(&d_conditions[type_i], n_neigh_i);
}
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
* \param filter_body True if body filtering is enabled
* \param diameter_shift True if rcut(i,j) should be shifted by the particle diameters
* \param compute_capability Compute capability of the GPU (in 20, 30, 35 format)
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
* \returns hipError_t on failure to texture bind
*
* \note Kernel calls are templated on body filtering and diameter shifting for optimization.
* \note One thread is called for all leaf particles. Some of these threads will die because they correspond to ghost
* particles not owned by the rank. Because the leaf particles are sorted, there is no easy way to skip these
* particles, and this inefficiency is assumed to be relatively small.
*/
hipError_t gpu_nlist_traverse_tree(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int nnodes,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes,
bool filter_body,
bool diameter_shift,
const unsigned int compute_capability,
const unsigned int block_size)
{
// shared memory = r_list + Nmax
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + 2*sizeof(unsigned int)*ntypes;
// bind the neighborlist texture
if (compute_capability < 35)
{
pdata_pos_tex.normalized = false;
pdata_pos_tex.filterMode = hipFilterModePoint;
hipError_t error = hipBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*(N+nghosts));
if (error != hipSuccess)
return error;
leaf_xyzf_tex.normalized = false;
leaf_xyzf_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, leaf_xyzf_tex, d_leaf_xyzf, sizeof(Scalar4)*(N+nghosts));
if (error != hipSuccess)
return error;
leaf_db_tex.normalized = false;
leaf_db_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, leaf_db_tex, d_leaf_db, sizeof(Scalar2)*(N+nghosts));
if (error != hipSuccess)
return error;
aabb_node_bounds_tex.normalized = false;
aabb_node_bounds_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, aabb_node_bounds_tex, d_tree_aabbs, sizeof(Scalar4)*2*nnodes);
if (error != hipSuccess)
return error;
head_list_tex.normalized = false;
head_list_tex.filterMode = hipFilterModePoint;
error = hipBindTexture(0, head_list_tex, d_head_list, sizeof(unsigned int)*N);
if (error != hipSuccess)
return error;
}
if (!filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<0>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<0>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<1>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<1>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (!filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<2>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<2>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<3>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<3>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
// unbind the textures
if (compute_capability < 35)
{
hipError_t error = hipUnbindTexture(pdata_pos_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(leaf_xyzf_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(leaf_db_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(aabb_node_bounds_tex);
if (error != hipSuccess)
return error;
error = hipUnbindTexture(head_list_tex);
if (error != hipSuccess)
return error;
}
return hipSuccess;
}
//! Kernel to find divisons between particle types in sorted order
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_pos Particle positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
*
* The starting index for each type of particles is the first particle where the left neighbor is not of the same type.
*/
__global__ void gpu_nlist_get_divisions_kernel(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const unsigned int cur_pidx = d_map_tree_pid[idx];
// get type of the current particle
const Scalar4 cur_postype = d_pos[cur_pidx];
const unsigned int cur_type = __scalar_as_int(cur_postype.w);
// all particles except for the first one should look left
if (idx > 0)
{
const unsigned int left_pidx = d_map_tree_pid[idx - 1];
// get type of the particle to my left
const Scalar4 left_postype = d_pos[left_pidx];
const unsigned int left_type = __scalar_as_int(left_postype.w);
// if the left has a different type, then this is a type boundary, and the type starts at the current thread index
if (left_type != cur_type)
{
d_type_head[cur_type] = idx + 1; // offset the index +1 so that we can use 0 to mean "none of this found"
}
}
else // the first particle just sets its type to be 1
{
d_type_head[cur_type] = 1;
}
}
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_num_per_type Number of particles per type
* \param d_leaf_offset Offset for reading particles out of leaf order
* \param d_tree_roots Root node of each tree
* \param d_pos Particles positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
* \param ntypes Number of types
* \param block_size Requested thread block size
*
* \returns hipSuccess on completion
*/
hipError_t gpu_nlist_init_count(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N,
const unsigned int ntypes,
const unsigned int block_size)
{
// apply the scan
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_get_divisions_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// zero out the head list
hipMemset(d_type_head, 0, sizeof(unsigned int)*ntypes);
// get the head list divisions
hipLaunchKernelGGL(( gpu_nlist_get_divisions_kernel), dim3(N/run_block_size + 1), dim3(run_block_size), 0, 0, d_type_head, d_pos, d_map_tree_pid, N);
return hipSuccess;
}
#undef MORTON_CODE_BITS
#undef MORTON_TYPE_MASK_64
#undef MORTON_CODE_N_BINS
| 1ee92c4520fc5368fec934b7253eb008e208fcd5.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
#include "NeighborListGPUTree.cuh"
#include "hoomd/TextureTools.h"
#include "hoomd/extern/cub/cub/cub.cuh"
#define MORTON_CODE_BITS 30 //!< Length of the Morton code in bits (k = 10 bits per direction)
#define MORTON_CODE_N_BINS 1024 //!< Number of bins (2^10) per direction to generate 30 bit Morton codes
#define MORTON_TYPE_MASK_64 0x000000003fffffffu //!< 64 bit mask to turn morton code-type back to morton code
/*! \file NeighborListGPUTree.cu
\brief Defines GPU kernel code for neighbor list tree traversal on the GPU
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading leaf data
scalar4_tex_t leaf_xyzf_tex;
//! Texture for the diameter / body
scalar2_tex_t leaf_db_tex;
//! Texture for reading node upper and lower bounds
scalar4_tex_t aabb_node_bounds_tex;
//! Texture for the head list
texture<unsigned int, 1, cudaReadModeElementType> head_list_tex;
//!< Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit.
/*!
* \param v unsigned integer with 10 bits set
* \returns The integer expanded with two zeros interleaved between bits
* http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/
*/
__device__ inline unsigned int expandBits(unsigned int v)
{
v = (v * 0x00010001u) & 0xFF0000FFu;
v = (v * 0x00000101u) & 0x0F00F00Fu;
v = (v * 0x00000011u) & 0xC30C30C3u;
v = (v * 0x00000005u) & 0x49249249u;
return v;
}
//! Assigns the Morton code-type key for each particle on this processor
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
*
* \b Implementation
* A sorting key is generated for each particle by determining the 30 bit Morton code for each particle, and then
* concatenating onto the type. Both the Morton code and the type are 32 bit integers, so the concatenation is stored
* compactly in a 64 bit integer morton_type = (type << 30) + morton code. In this way, a lexicographic sort will
* sort first by type, then by morton code. The corresponding particle id (thread index) is stashed into d_map_tree_pid
* to track particles after sorting.
*/
__global__ void gpu_nlist_morton_types_kernel(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim box,
const Scalar3 ghost_width)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N+nghosts)
return;
// acquire particle data
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
const unsigned int type = __scalar_as_int(postype.w);
// get position in simulation box
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
/* check if the particle is inside the unit cell + ghost layer in all dimensions
* this tolerance is small enough that when we multiply by the morton code bin size, we are still in range
* we silently ignore ghosts outside of this width, and instead deal with that special case below
* where extra ghosts are communicated (e.g. for bonded interactions)
*/
if (((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001))) && idx < N)
{
atomicMax(d_morton_conditions,idx+1);
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * MORTON_CODE_N_BINS);
int jb = (int)(f.y * MORTON_CODE_N_BINS);
int kb = (int)(f.z * MORTON_CODE_N_BINS);
if (!periodic.x) // ghosts exist and may be past layer width
{
// handle special cases where random ghosts are beyond the expected layer
// by just rounding to the nearest edge
if (ib < 0)
{
ib = 0;
}
else if (ib >= MORTON_CODE_N_BINS)
{
ib = MORTON_CODE_N_BINS - 1;
}
}
else if (ib == MORTON_CODE_N_BINS) // some particles lie exactly on the edge, floor them to zero
{
ib = 0;
}
// do as for x in y
if (!periodic.y)
{
if (jb < 0)
{
jb = 0;
}
else if (jb >= MORTON_CODE_N_BINS)
{
jb = MORTON_CODE_N_BINS - 1;
}
}
else if (jb == MORTON_CODE_N_BINS)
{
jb = 0;
}
// do as for y in z
if (!periodic.z)
{
if (kb < 0)
{
kb = 0;
}
else if (kb >= MORTON_CODE_N_BINS)
{
kb = MORTON_CODE_N_BINS - 1;
}
}
else if (kb == MORTON_CODE_N_BINS)
{
kb = 0;
}
// inline call to some bit swizzling arithmetic
unsigned int ii = expandBits((unsigned int)ib);
unsigned int jj = expandBits((unsigned int)jb);
unsigned int kk = expandBits((unsigned int)kb);
unsigned int morton_code = ii * 4 + jj * 2 + kk;
// save the morton code and corresponding particle index for sorting
// the morton codes hold both the type and the code to sort by both type and position simultaneously
d_morton_types[idx] = (((uint64_t)type) << MORTON_CODE_BITS) + (uint64_t)morton_code;
d_map_tree_pid[idx] = idx;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_map_tree_pid List to be overwritten with particle ids in ascending order
* \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds
* \param d_pos Particle positions
* \param N Number of local particles
* \param nghosts Number of ghost particles
* \param box Local simulation box
* \param ghost_width Anticipated size of the ghost layer for nonbonded interactions
* \param block_size Requested thread block size of kernel launch
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_morton_types(uint64_t *d_morton_types,
unsigned int *d_map_tree_pid,
int *d_morton_conditions,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int nghosts,
const BoxDim& box,
const Scalar3 ghost_width,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_morton_types_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
gpu_nlist_morton_types_kernel<<<(N+nghosts)/run_block_size + 1, run_block_size>>>(d_morton_types,
d_map_tree_pid,
d_morton_conditions,
d_pos,
N,
nghosts,
box,
ghost_width);
return cudaSuccess;
}
/*!
* \param d_morton_types Morton code-type keys per particle
* \param d_morton_types_alt Auxiliary array of equal size to d_morton_types for double buffered sorting
* \param d_map_tree_pid List of particle ids
* \param d_map_tree_pid_alt Auxiliary array of equal size to d_map_tree_pid for double buffered sorting
* \param d_tmp_storage Temporary storage in device memory
* \param tmp_storage_bytes Number of bytes allocated for temporary storage
* \param swap_morton Flag to switch real data from auxiliary array to primary array after sorting
* \param swap_map Flag to switch real data from auxiliary array to primary array after sorting
* \param Ntot Total number of keys to sort
* \param n_type_bits Number of bits to check for particle types
*
* \returns cudaSuccess on completion
*
* \b Implementation
* The CUB library is used for device-wide radix sorting. Radix sorting is O(kN) where k is the number of bits to check
* in an unsigned integer key, and N is the number of keys. We restrict the number of bits checked in the max 64 bit
* keys by only checking up to the MORTON_CODE_BITS + n_type_bits most significant bit. CUB DeviceRadixSort performs
* its own tuning at run time.
*
* Because CUB requires temporary storage, this function must be called twice. First, when \a d_tmp_storage is NULL,
* the number of bytes required for temporary storage is saved in \a tmp_storage_bytes. This memory must then be
* allocated in \a d_tmp_storage. On the second call, the radix sort is performed. Because the radix sort may put the
* active (sorted) buffer in either slot of the DoubleBuffer, a boolean flag is set in \a swap_morton and \a swap_map
* for whether these data arrays should be swapped.
*/
cudaError_t gpu_nlist_morton_sort(uint64_t *d_morton_types,
uint64_t *d_morton_types_alt,
unsigned int *d_map_tree_pid,
unsigned int *d_map_tree_pid_alt,
void *d_tmp_storage,
size_t &tmp_storage_bytes,
bool &swap_morton,
bool &swap_map,
const unsigned int Ntot,
const unsigned int n_type_bits)
{
// initialize memory as "double buffered"
cub::DoubleBuffer<uint64_t> d_keys(d_morton_types, d_morton_types_alt);
cub::DoubleBuffer<unsigned int> d_vals(d_map_tree_pid, d_map_tree_pid_alt);
// on the first pass, this just sizes the temporary storage
// on the second pass, it actually does the radix sort
cub::DeviceRadixSort::SortPairs(d_tmp_storage,
tmp_storage_bytes,
d_keys,
d_vals,
Ntot,
0,
MORTON_CODE_BITS+n_type_bits);
// we've only done something to the buffers on the second time when temporary storage is allocated
if (d_tmp_storage != NULL)
{
// mark that the gpu arrays should be flipped if the final result is not in the right array
swap_morton = (d_keys.selector == 1);
swap_map = (d_vals.selector == 1);
}
return cudaSuccess;
}
//! Kernel to merge adjacent codes into leaf nodes
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread per leaf is called, and is responsible for merging NLIST_GPU_PARTICLES_PER_LEAF into an AABB. Each thread
* first determines what type of leaf particle it is operating on by calculating and iterating on the number of leafs
* of each type. Then, the starting index is determined by subtracting d_leaf_offset[type] from the starting index that
* would be set in a nleaf x NLIST_GPU_PARTICLES_PER_LEAF array. The reason for this complexity is that the leaf particle
* array is not permitted to have any "holes" in it for faster traversal. The AABB is merged from the particle
* positions, and a Morton code is assigned to this AABB for determining tree hierarchy based on the Morton code of
* the first particle in the leaf. Although this does not necessarily generate the best ordering along the Z order curve
* for the newly merged leafs, it does guarantee that the leaf Morton codes are still in lexicographic ordering.
*
* AABBs are stored as two Scalar4s in a flat array. The first three coordinates of each Scalar4 correspond to the upper
* and lower bounds of the AABB. The last value of the upper AABB will hold a "rope" for traversing the tree (see
* gpu_nlist_bubble_aabbs_kernel), while the last value of the lower AABB holds the number of particles for a leaf node,
* or the left child for an internal node. This is determined by setting a bit to mark this value as a rope or as child.
*/
__global__ void gpu_nlist_merge_particles_kernel(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs)
{
// leaf index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per leaf
if (idx >= nleafs)
return;
// get what type of leaf I am
unsigned int total_bins = 0;
int leaf_type = -1;
unsigned int max_idx = Ntot;
for (unsigned int cur_type=0; leaf_type == -1 && cur_type < ntypes; ++cur_type)
{
total_bins += (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (idx < total_bins)
{
leaf_type = cur_type;
for (unsigned int next_type=cur_type+1; next_type < ntypes; ++next_type)
{
if (d_type_head[next_type])
{
max_idx = d_type_head[next_type] - 1;
break; // quit out of this inner loop once a match is found
}
}
break; // quit the outer loop
}
}
// get the starting particle index assuming naive leaf structure, and then subtract offset to eliminate "holes"
unsigned int start_idx = idx*NLIST_GPU_PARTICLES_PER_LEAF - d_leaf_offset[leaf_type];
unsigned int end_idx = (max_idx - start_idx > NLIST_GPU_PARTICLES_PER_LEAF) ? start_idx + NLIST_GPU_PARTICLES_PER_LEAF : max_idx;
// upper also holds the skip value, but we have no idea what this is right now
Scalar4 upper = d_pos[ d_map_tree_pid[start_idx] ];
upper.w = 0.0f;
// lower holds the particle number, we have one already
Scalar4 lower = upper;
unsigned int npart = 1;
for (unsigned int cur_p=start_idx+1; cur_p < end_idx; ++cur_p)
{
Scalar4 cur_pos = d_pos[ d_map_tree_pid[cur_p] ];
// merge the boxes together
if (cur_pos.x < lower.x) lower.x = cur_pos.x;
if (cur_pos.x > upper.x) upper.x = cur_pos.x;
if (cur_pos.y < lower.y) lower.y = cur_pos.y;
if (cur_pos.y > upper.y) upper.y = cur_pos.y;
if (cur_pos.z < lower.z) lower.z = cur_pos.z;
if (cur_pos.z > upper.z) upper.z = cur_pos.z;
++npart;
}
d_tree_aabbs[2*idx] = upper;
d_tree_aabbs[2*idx + 1] = make_scalar4(lower.x, lower.y, lower.z, __int_as_scalar(npart << 1));
// take logical AND with the 30 bit mask for the morton codes to extract just the morton code
// no sense swinging around 64 bit integers anymore
d_morton_codes_red[idx] = (unsigned int)(d_morton_types[start_idx] & MORTON_TYPE_MASK_64);
// fill the parent/sib relationships as if everything is a single leaf at first, to be overridden by hierarchy gen
// when this is not the case
d_tree_parent_sib[idx] = make_uint2(idx, idx << 1);
}
/*!
* \param d_tree_aabbs Flat array holding all AABBs for the tree
* \param d_morton_codes_red The Morton codes corresponding to the merged leafs
* \param d_tree_parent_sib Parent and sibling indexes for all nodes
* \param d_morton_types Morton-code type keys for all particles
* \param d_pos Particle positions
* \param d_num_per_type Number of particles per type
* \param ntypes Number of particle types
* \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index)
* \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type
* \param d_type_head Index to first type and leaf ordered particles by type
* \param Ntot Total number of keys to sort
* \param nleafs Number of leaf nodes
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_merge_particles(Scalar4 *d_tree_aabbs,
uint32_t *d_morton_codes_red,
uint2 *d_tree_parent_sib,
const uint64_t *d_morton_types,
const Scalar4 *d_pos,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_type_head,
const unsigned int Ntot,
const unsigned int nleafs,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_merge_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
gpu_nlist_merge_particles_kernel<<<nleafs/run_block_size + 1, block_size>>>(d_tree_aabbs,
d_morton_codes_red,
d_tree_parent_sib,
d_morton_types,
d_pos,
d_num_per_type,
ntypes,
d_map_tree_pid,
d_leaf_offset,
d_type_head,
Ntot,
nleafs);
return cudaSuccess;
}
//! Computes the longest common prefix between Morton codes
/*!
* \param d_morton_codes Array of Morton codes
* \param i First Morton code index
* \param j Second Morton code index
* \param min_idx The smallest index considered "in range" (inclusive)
* \param max_idx The last index considered "in range" (inclusive)
*
* \returns number of bits shared between the Morton codes of i and j
*
* delta(i,j) is defined as the largest number of bits shared between Morton codes i and j. When the Morton codes are
* sorted, this implies delta(i',j') >= delta(i,j) for any i',j' in [i,j]. If i and j lie outside
* of the range of Morton codes corresponding to this tree, then it always returns -1. If the Morton codes for i and j
* are identical, then the longest prefix of i and j is used as a tie breaker.
*/
__device__ inline int delta(const uint32_t *d_morton_codes,
unsigned int i,
unsigned int j,
int min_idx,
int max_idx)
{
if (j > max_idx || j < min_idx)
{
return -1;
}
uint32_t first_code = d_morton_codes[i];
uint32_t last_code = d_morton_codes[j];
// if codes match, then use index as tie breaker
// the number of shared bits is equal to the 32 bits in the integer, plus the number of bits shared between the
// indexes (offset from the start of the node range to make things simpler)
if (first_code == last_code)
{
return (32 + __clz((i-min_idx) ^ (j-min_idx)));
}
else
{
return __clz(first_code ^ last_code);
}
}
//! Determines the range of Morton codes that a node covers
/*!
* \param d_morton_codes Array of Morton codes
* \param min_idx The smallest Morton code index considered "in range" (inclusive)
* \param max_idx The last Morton code index considered "in range" (inclusive)
* \param idx Current node (Morton code) index
*
* \returns the minimum and maximum leafs covered by this node
* \note This is a literal implementation of the Karras pseudocode, with no optimizations or refinement.
* Tero Karras, "Maximizing parallelism in the construction of BVHs, octrees, and k-d trees",
* High Performance Graphics (2012).
*/
__device__ inline uint2 determineRange(const uint32_t *d_morton_codes,
const int min_idx,
const int max_idx,
const int idx)
{
int forward_prefix = delta(d_morton_codes, idx, idx+1, min_idx, max_idx);
int backward_prefix = delta(d_morton_codes, idx, idx-1, min_idx, max_idx);
// get direction of the range based on sign
int d = ((forward_prefix - backward_prefix) > 0) ? 1 : -1;
// get minimum prefix
int min_prefix = delta(d_morton_codes, idx, idx-d, min_idx, max_idx);
// get maximum prefix by binary search
int lmax = 2;
while( delta(d_morton_codes, idx, idx + d*lmax, min_idx, max_idx) > min_prefix)
{
lmax = lmax << 1;
}
unsigned int len = 0;
unsigned int step = lmax;
do
{
step = step >> 1;
unsigned int new_len = len + step;
if (delta(d_morton_codes, idx, idx + d*new_len, min_idx, max_idx) > min_prefix)
len = new_len;
}
while (step > 1);
// order range based on direction
uint2 range;
if (d > 0)
{
range.x = idx;
range.y = idx + len;
}
else
{
range.x = idx - len;
range.y = idx;
}
return range;
}
//! Finds the split position in Morton codes covered by a range
/*!
* \param d_morton_codes Array of Morton codes
* \param first First leaf node in the range
* \param last Last leaf node in the range
*
* \returns the leaf index corresponding to the split in Morton codes
* See determineRange for original source of algorithm.
*/
__device__ inline unsigned int findSplit(const uint32_t *d_morton_codes,
const unsigned int first,
const unsigned int last)
{
uint32_t first_code = d_morton_codes[first];
uint32_t last_code = d_morton_codes[last];
// if codes match, then just split evenly
if (first_code == last_code)
return (first + last) >> 1;
// get the length of the common prefix
int common_prefix = __clz(first_code ^ last_code);
// assume split starts at first, and begin binary search
unsigned int split = first;
unsigned int step = last - first;
do
{
// exponential decrease (is factor of 2 best?)
step = (step + 1) >> 1;
unsigned int new_split = split + step;
// if proposed split lies within range
if (new_split < last)
{
unsigned int split_code = d_morton_codes[new_split];
int split_prefix = __clz(first_code ^ split_code);
// if new split shares a longer number of bits, accept it
if (split_prefix > common_prefix)
{
split = new_split;
}
}
}
while (step > 1);
return split;
}
//! Kernel to generate the parent-child-sibling relationships between nodes
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
*
* \b Implementation
* One thread is called per internal node in a single kernel launch. Each thread first determines its "local" index
* as an internal node within a tree based on the number of leafs per tree. The range of leafs covered by the internal
* node is determined, and then its split position is identified. The split identifies the children of the node as
* another internal node or as a leaf node.
*
* The parent and sibling of each child node is saved. The sibling id is bit shifted so as to use a single bit to encode
* the sibling as a right child or left child (after shifting, we set the bit to 1 if the sibling is a right child).
* If the child is a root node, it also saves information for itself (since no other node ever identifies a root as a
* child node).
*/
__global__ void gpu_nlist_gen_hierarchy_kernel(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal)
{
// compute the internal node index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per internal node
if (idx >= ninternal)
return;
// get what type of leaf I am
unsigned int min_idx = 0; // the "0" of the leaf node array
unsigned int max_idx = 0; // the "N-1" of the leaf node array
unsigned int node_idx = idx;
unsigned int origin = 0;
unsigned int end = 0;
unsigned int cur_type=0;
unsigned int active_types=0;
for (cur_type=0; cur_type < ntypes; ++cur_type)
{
// current min index is the previous max index
min_idx = max_idx;
// max index adds the number of internal nodes in this type (nleaf - 1)
const unsigned int cur_nleaf = (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF;
if (cur_nleaf > 0)
{
max_idx += cur_nleaf-1;
++active_types;
}
// we break the loop if we are in range
if (idx < max_idx)
{
// decrement by 1 to get this back into the number we really need
--active_types;
// now, we repurpose the min and max index to now correspond to the *leaf* index.
// the min index is the minimum *leaf* index
origin = min_idx + active_types;
end = max_idx + active_types;
node_idx += active_types;
break;
}
}
// enact the magical split determining
uint2 range = determineRange(d_morton_codes, origin, end, node_idx);
unsigned int first = range.x;
unsigned int last = range.y;
unsigned int split = findSplit(d_morton_codes, first, last);
uint2 children;
// set the children, shifting ahead by nleafs - cur_type to account for leaf shifting
// this factor comes out from resetting 0 = N_leaf,i each time, and then remapping this to
// an internal node
children.x = (split == first) ? split : (nleafs - active_types + split);
children.y = ((split + 1) == last) ? (split + 1) : nleafs - active_types + split + 1;
uint2 parent_sib;
parent_sib.x = nleafs + idx;
// encode the sibling as the right child
parent_sib.y = children.y << 1;
parent_sib.y |= 1;
d_tree_parent_sib[children.x] = parent_sib;
// encode the sibling as the left child
parent_sib.y = children.x << 1;
d_tree_parent_sib[children.y] = parent_sib;
// root is always number "zero", but only it can set its parent / sibling
// we mark both of these as the root for traversing, since only the root node
// will be its own sibling
if (node_idx == origin)
{
parent_sib.x = nleafs + idx;
parent_sib.y = (nleafs + idx) << 1;
d_tree_parent_sib[nleafs + idx] = parent_sib;
}
}
/*!
* \param d_tree_parent_sib Parent and sibling for each node in the tree
* \param d_morton_codes Morton codes for each leaf node
* \param d_num_per_type Number of particles per type
* \param ntypes Number of types
* \param nleafs Number of leafs
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_gen_hierarchy(uint2 *d_tree_parent_sib,
const uint32_t *d_morton_codes,
const unsigned int *d_num_per_type,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_gen_hierarchy_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// one thread per internal node
gpu_nlist_gen_hierarchy_kernel<<<ninternal/run_block_size + 1, run_block_size>>>(d_tree_parent_sib,
d_morton_codes,
d_num_per_type,
ntypes,
nleafs,
ninternal);
return cudaSuccess;
}
//! Kernel to bubble up enclosing AABBs to internal nodes from leaf nodes
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
*
* \b Implementation
* One thread is called per leaf node. The second thread to reach an internal node processes its two children,
* which guarantees that no node AABB is prematurely processed. The arrival order at a node is controlled by an atomic
* thread lock in global memory. This locking could be accelerated by using shared memory whenever a node is being
* processed by threads in the same block.
*
* When processing the node, the thread also walks up the tree to find the "rope" that tells a traverser
* how to navigate the tree. If a query AABB intersects the current node, then the traverser always moves the the left
* child of the current node. If the AABB does not intersect, it moves along the "rope" to the next portion of the tree.
* The "rope" is calculated by walking back up the tree to find the earliest ancestor that is a left child of its
* parent. The rope then goes to that ancestor's sibling. If the root node is reached, then the rope is set to -1 to
* indicate traversal should be aborted.
*
* This kernel also encodes the left child of a node into the AABB for internal nodes. The thread processing the node
* checks if it arrived from a left child or right child of the node it is processing, and sets the left child of that
* parent accordingly. A child is indicated by bit shifting, and setting the first bit to 1.
*/
__global__ void gpu_nlist_bubble_aabbs_kernel(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs)
{
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nleafs)
return;
// okay, first we start from the leaf and set my bounding box
Scalar4 cur_upper = d_tree_aabbs[2*idx];
Scalar4 cur_lower = d_tree_aabbs[2*idx+1];
// zero the counters for internal nodes
cur_upper.w = 0.0f;
cur_lower.w = 0.0f;
unsigned int cur_node = idx;
unsigned int lock_key = 0;
do
{
uint2 cur_parent_sib = d_tree_parent_sib[cur_node];
unsigned int cur_parent = cur_parent_sib.x;
// if the current sibling is a right child, then the current node is a left child
bool cur_is_left = (cur_parent_sib.y & 1);
unsigned int cur_sibling = cur_parent_sib.y >> 1;
// first we compute the skip for this node always
// back track up the tree until you find a left child
// we have a check in place so that we don't stall on the root node
uint2 backtrack = cur_parent_sib;
while (!(backtrack.y & 1) && backtrack.x != (backtrack.y >> 1))
{
backtrack = d_tree_parent_sib[backtrack.x];
}
// then, the skip is to the sibling of that node, or else to quit
if (backtrack.y & 1)
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(backtrack.y >> 1);
}
else
{
d_tree_aabbs[2*cur_node].w = __int_as_scalar(-1);
}
// then, we do an atomicAdd on the lock to see if we need to process the parent AABBs
// check to make sure the parent is bigger than nleafs, or else the node lock always fails
// so that we terminate the thread
lock_key = (cur_parent >= nleafs) ? atomicAdd(d_node_locks + cur_parent - nleafs, 1) : 0;
// process the node
if (lock_key == 1)
{
// compute the max upper bound
Scalar4 sib_upper = d_tree_aabbs[2*cur_sibling];
if (sib_upper.x > cur_upper.x) cur_upper.x = sib_upper.x;
if (sib_upper.y > cur_upper.y) cur_upper.y = sib_upper.y;
if (sib_upper.z > cur_upper.z) cur_upper.z = sib_upper.z;
d_tree_aabbs[2*cur_parent] = cur_upper;
// compute the min lower bound
Scalar4 sib_lower = d_tree_aabbs[2*cur_sibling+1];
if (sib_lower.x < cur_lower.x) cur_lower.x = sib_lower.x;
if (sib_lower.y < cur_lower.y) cur_lower.y = sib_lower.y;
if (sib_lower.z < cur_lower.z) cur_lower.z = sib_lower.z;
// this must always be some internal node, so stash the left child of this node here
unsigned int left_child_masked = ((cur_is_left ? cur_node : cur_sibling) << 1) | 1;
cur_lower.w = __int_as_scalar( left_child_masked );
d_tree_aabbs[2*cur_parent+1] = cur_lower;
// bump the current node one level
cur_node = cur_parent;
}
}
while (lock_key == 1);
}
/*!
* \param d_node_locks Atomic flags identifying when node has been visited
* \param d_tree_aabbs AABB array for all tree nodes
* \param d_tree_parent_sib Parent and sibling indexes of each node
* \param ntypes Number of particle types
* \param nleafs Number of leaf nodes
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_bubble_aabbs(unsigned int *d_node_locks,
Scalar4 *d_tree_aabbs,
const uint2 *d_tree_parent_sib,
const unsigned int ntypes,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int block_size)
{
cudaMemset(d_node_locks, 0, sizeof(unsigned int)*ninternal);
gpu_nlist_bubble_aabbs_kernel<<<nleafs/block_size + 1, block_size>>>(d_node_locks,
d_tree_aabbs,
d_tree_parent_sib,
ntypes,
nleafs);
return cudaSuccess;
}
//! Kernel to rearrange particle data into leaf order for faster traversal
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
*
* \b Implementation
* One thread per particle is called. Writes are coalesced by writing in leaf order, and reading in a scattered way.
*/
__global__ void gpu_nlist_move_particles_kernel(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot)
{
// get thread index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= Ntot)
return;
// read and write particle data
unsigned int p_idx = d_map_tree_pid[idx];
Scalar4 pos_i = d_pos[p_idx];
d_leaf_xyzf[idx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __int_as_scalar(p_idx));
Scalar2 db = make_scalar2(d_diameter[p_idx], __int_as_scalar(d_body[p_idx]));
d_leaf_db[idx] = db;
}
/*!
* \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order
* \param d_leaf_db Particle diameter and body id in leaf order
* \param d_pos Particle positions
* \param d_diameter Particle diameters
* \param d_body Particle body ids
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param Ntot Number of particles owned by this rank
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_move_particles(Scalar4 *d_leaf_xyzf,
Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int *d_map_tree_pid,
const unsigned int Ntot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_move_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
gpu_nlist_move_particles_kernel<<<Ntot/run_block_size + 1, run_block_size>>>(d_leaf_xyzf,
d_leaf_db,
d_pos,
d_diameter,
d_body,
d_map_tree_pid,
Ntot);
return cudaSuccess;
}
//! Kernel for traversing tree to generate neighbor list
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
*
* \b Implementation
* One thread is launched per particle, but the threads operate on particles in leaf order rather than ParticleData
* order in order to minimize divergence within a warp (particles in the same leaf should intersect similar parts of the
* tree). Each thread iterates on the particle types (trees) and queries on all translation vectors using a stackless
* search. When the query AABB intersects a node AABB, the node AABB is checked to be an internal node or a leaf node.
* If an internal node, then the traversal advances to that node's left child. If a leaf node, the leaf particles are
* tested directly to be included in the neighbor list. The node then advances along that leaf node's rope. If the AABB
* is not intersected, the traversal advances along the rope. This process proceeds until a rope signals that the
* traversal is complete.
*/
template<unsigned char flags>
__global__ void gpu_nlist_traverse_tree_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
const Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
unsigned int *s_leaf_offset = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters + sizeof(unsigned int)*ntypes]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
s_leaf_offset[cur_offset + threadIdx.x] = d_leaf_offset[cur_offset + threadIdx.x];
}
}
__syncthreads();
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the leaf list
if (idx >= (N+nghosts))
return;
// read in the current position
unsigned int my_pidx = d_map_tree_pid[idx];
// we only process particles owned by this processor for neighbors
if (my_pidx >= N)
return;
const Scalar4 postype_i = texFetchScalar4(d_pos, pdata_pos_tex, my_pidx);
const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z);
const unsigned int type_i = __scalar_as_int(postype_i.w);
// fetch the diameter and body out of the leaf texture since it's bound anyway
const Scalar2 db_i = texFetchScalar2(d_leaf_db, leaf_db_tex, idx);
const Scalar diam_i = db_i.x;
const unsigned int body_i = __scalar_as_int(db_i.y);
const unsigned int nlist_head_i = texFetchUint(d_head_list, head_list_tex, my_pidx);
unsigned int n_neigh_i = 0;
for (unsigned int cur_pair_type=0; cur_pair_type < ntypes; ++cur_pair_type)
{
// Check primary box
const Scalar r_cut_i = s_r_list[typpair_idx(type_i,cur_pair_type)];
// Skip this tree type if it is not needed
if (r_cut_i <= Scalar(0.0))
continue;
// stash the r_cutsq before any diameter shifting
const Scalar r_cutsq_i = r_cut_i*r_cut_i;
// the rlist to use for the AABB search has to be at least as big as the biggest diameter
Scalar r_list_i = r_cut_i;
if (diameter_shift)
r_list_i += max_diam - Scalar(1.0);
const unsigned int cur_tree_root = d_tree_roots[cur_pair_type];
// skip this type if we don't have it
if (cur_tree_root == NLIST_GPU_INVALID_NODE)
continue;
for (unsigned int cur_image = 0; cur_image < nimages; ++cur_image)
{
const Scalar3 pos_i_image = pos_i + d_image_list[cur_image];
const Scalar3 aabb_upper = make_scalar3(pos_i_image.x + r_list_i,
pos_i_image.y + r_list_i,
pos_i_image.z + r_list_i);
const Scalar3 aabb_lower = make_scalar3(pos_i_image.x - r_list_i,
pos_i_image.y - r_list_i,
pos_i_image.z - r_list_i);
// stackless search
int cur_node_idx = cur_tree_root;
while (cur_node_idx > -1)
{
const Scalar4 upper_rope = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx);
const Scalar4 lower_np = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx+1);
if (!(aabb_upper.x < lower_np.x
|| aabb_lower.x > upper_rope.x
|| aabb_upper.y < lower_np.y
|| aabb_lower.y > upper_rope.y
|| aabb_upper.z < lower_np.z
|| aabb_lower.z > upper_rope.z))
{
const unsigned int np_child_masked = __scalar_as_int(lower_np.w);
if(!(np_child_masked & 1))
{
// leaf node
// all leaves must have at least 1 particle, so we can use this to decide
const unsigned int node_head = NLIST_GPU_PARTICLES_PER_LEAF*cur_node_idx - s_leaf_offset[cur_pair_type];
const unsigned int n_part = np_child_masked >> 1;
for (unsigned int cur_p = node_head; cur_p < node_head + n_part; ++cur_p)
{
// neighbor j
const Scalar4 cur_xyzf = texFetchScalar4(d_leaf_xyzf, leaf_xyzf_tex, cur_p);
const Scalar3 pos_j = make_scalar3(cur_xyzf.x, cur_xyzf.y, cur_xyzf.z);
const unsigned int j = __scalar_as_int(cur_xyzf.w);
const Scalar2 cur_db = texFetchScalar2(d_leaf_db, leaf_db_tex, cur_p);
const Scalar diam_j = cur_db.x;
const unsigned int body_j = __scalar_as_int(cur_db.y);
bool excluded = (my_pidx == j);
if (filter_body && body_i != 0xffffffff)
excluded = excluded | (body_i == body_j);
if (!excluded)
{
// now we can trim down the actual particles based on diameter
// compute the shift for the cutoff if not excluded
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (diam_i + diam_j) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_cut_i) * delta;
}
// compute distance and wrap back into box
Scalar3 drij = pos_j - pos_i_image;
Scalar dr2 = dot(drij,drij);
if (dr2 <= (r_cutsq_i + sqshift))
{
if (n_neigh_i < s_Nmax[type_i])
{
d_nlist[nlist_head_i + n_neigh_i] = j;
}
++n_neigh_i;
}
}
}
// leaf nodes always move to their rope
cur_node_idx = __scalar_as_int(upper_rope.w);
}
else
{
// internal node, take left child
cur_node_idx = (np_child_masked >> 1);
}
}
else
{
cur_node_idx = __scalar_as_int(upper_rope.w); // no overlap, rope ahead
}
} // end stackless search
} // end loop over images
} // end loop over pair types
// could try reordering by idx instead of pidx, but that seems to not make much difference in microbenchmarking.
d_n_neigh[my_pidx] = n_neigh_i;
d_last_updated_pos[my_pidx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __scalar_as_int(type_i));
// update the number of neighbors for this type if allocated memory is exceeded
if (n_neigh_i >= s_Nmax[type_i])
atomicMax(&d_conditions[type_i], n_neigh_i);
}
/*!
* \param d_nlist Neighbor list for writing
* \param d_n_neigh Number of neighbors per particle
* \param d_last_updated_pos Records current particle positions
* \param d_conditions Store overflow condition by type
* \param d_Nmax Maximum number of neighbors allocated by type
* \param d_head_list Indexes for writing into neighbor list
* \param N Number of particles
* \param nghosts Number of ghost particles
* \param d_map_tree_pid Map leaf index to local particle index
* \param d_leaf_offset Offset for reading leaf particles by type
* \param d_tree_roots Index for tree root by type
* \param d_tree_aabbs Tree AABBs
* \param nleafs Total number of leafs
* \param d_leaf_xyzf Leaf position-id array
* \param d_leaf_db Leaf diameter-body array
* \param d_pos Particle positions
* \param d_image_list Translation vectors to check for traversal
* \param nimages Number of translation vectors to check
* \param d_r_cut Cutoff radius by type r_cut(i,j)
* \param r_buff Buffer around cutoff radius
* \param max_diam Maximum diameter attained by a particle for diameter shifting
* \param ntypes Number of particle types
* \param filter_body True if body filtering is enabled
* \param diameter_shift True if rcut(i,j) should be shifted by the particle diameters
* \param compute_capability Compute capability of the GPU (in 20, 30, 35 format)
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
* \returns cudaError on failure to texture bind
*
* \note Kernel calls are templated on body filtering and diameter shifting for optimization.
* \note One thread is called for all leaf particles. Some of these threads will die because they correspond to ghost
* particles not owned by the rank. Because the leaf particles are sorted, there is no easy way to skip these
* particles, and this inefficiency is assumed to be relatively small.
*/
cudaError_t gpu_nlist_traverse_tree(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const unsigned int N,
const unsigned int nghosts,
const unsigned int *d_map_tree_pid,
const unsigned int *d_leaf_offset,
const unsigned int *d_tree_roots,
const Scalar4 *d_tree_aabbs,
const unsigned int nleafs,
const unsigned int ninternal,
const unsigned int nnodes,
const Scalar4 *d_leaf_xyzf,
const Scalar2 *d_leaf_db,
const Scalar4 *d_pos,
const Scalar3 *d_image_list,
const unsigned int nimages,
const Scalar *d_r_cut,
const Scalar r_buff,
const Scalar max_diam,
const unsigned int ntypes,
bool filter_body,
bool diameter_shift,
const unsigned int compute_capability,
const unsigned int block_size)
{
// shared memory = r_list + Nmax
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + 2*sizeof(unsigned int)*ntypes;
// bind the neighborlist texture
if (compute_capability < 35)
{
pdata_pos_tex.normalized = false;
pdata_pos_tex.filterMode = cudaFilterModePoint;
cudaError_t error = cudaBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*(N+nghosts));
if (error != cudaSuccess)
return error;
leaf_xyzf_tex.normalized = false;
leaf_xyzf_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, leaf_xyzf_tex, d_leaf_xyzf, sizeof(Scalar4)*(N+nghosts));
if (error != cudaSuccess)
return error;
leaf_db_tex.normalized = false;
leaf_db_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, leaf_db_tex, d_leaf_db, sizeof(Scalar2)*(N+nghosts));
if (error != cudaSuccess)
return error;
aabb_node_bounds_tex.normalized = false;
aabb_node_bounds_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, aabb_node_bounds_tex, d_tree_aabbs, sizeof(Scalar4)*2*nnodes);
if (error != cudaSuccess)
return error;
head_list_tex.normalized = false;
head_list_tex.filterMode = cudaFilterModePoint;
error = cudaBindTexture(0, head_list_tex, d_head_list, sizeof(unsigned int)*N);
if (error != cudaSuccess)
return error;
}
if (!filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<0>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<0><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && !diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<1>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<1><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (!filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<2>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<2><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
else if (filter_body && diameter_shift)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<3>);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
int nblocks = (N+nghosts)/run_block_size + 1;
gpu_nlist_traverse_tree_kernel<3><<<nblocks, run_block_size, shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
N,
nghosts,
d_map_tree_pid,
d_leaf_offset,
d_tree_roots,
d_tree_aabbs,
nleafs,
d_leaf_xyzf,
d_leaf_db,
d_pos,
d_image_list,
nimages,
d_r_cut,
r_buff,
max_diam,
ntypes);
}
// unbind the textures
if (compute_capability < 35)
{
cudaError_t error = cudaUnbindTexture(pdata_pos_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(leaf_xyzf_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(leaf_db_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(aabb_node_bounds_tex);
if (error != cudaSuccess)
return error;
error = cudaUnbindTexture(head_list_tex);
if (error != cudaSuccess)
return error;
}
return cudaSuccess;
}
//! Kernel to find divisons between particle types in sorted order
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_pos Particle positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
*
* The starting index for each type of particles is the first particle where the left neighbor is not of the same type.
*/
__global__ void gpu_nlist_get_divisions_kernel(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const unsigned int cur_pidx = d_map_tree_pid[idx];
// get type of the current particle
const Scalar4 cur_postype = d_pos[cur_pidx];
const unsigned int cur_type = __scalar_as_int(cur_postype.w);
// all particles except for the first one should look left
if (idx > 0)
{
const unsigned int left_pidx = d_map_tree_pid[idx - 1];
// get type of the particle to my left
const Scalar4 left_postype = d_pos[left_pidx];
const unsigned int left_type = __scalar_as_int(left_postype.w);
// if the left has a different type, then this is a type boundary, and the type starts at the current thread index
if (left_type != cur_type)
{
d_type_head[cur_type] = idx + 1; // offset the index +1 so that we can use 0 to mean "none of this found"
}
}
else // the first particle just sets its type to be 1
{
d_type_head[cur_type] = 1;
}
}
/*!
* \param d_type_head Index to first type in leaf ordered particles by type
* \param d_num_per_type Number of particles per type
* \param d_leaf_offset Offset for reading particles out of leaf order
* \param d_tree_roots Root node of each tree
* \param d_pos Particles positions
* \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id
* \param N Total number of particles on rank (including ghosts)
* \param ntypes Number of types
* \param block_size Requested thread block size
*
* \returns cudaSuccess on completion
*/
cudaError_t gpu_nlist_init_count(unsigned int *d_type_head,
const Scalar4 *d_pos,
const unsigned int *d_map_tree_pid,
const unsigned int N,
const unsigned int ntypes,
const unsigned int block_size)
{
// apply the scan
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_get_divisions_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
int run_block_size = min(block_size,max_block_size);
// zero out the head list
cudaMemset(d_type_head, 0, sizeof(unsigned int)*ntypes);
// get the head list divisions
gpu_nlist_get_divisions_kernel<<<N/run_block_size + 1, run_block_size>>>(d_type_head, d_pos, d_map_tree_pid, N);
return cudaSuccess;
}
#undef MORTON_CODE_BITS
#undef MORTON_TYPE_MASK_64
#undef MORTON_CODE_N_BINS
|
4f6a14ead98e2580462189ff03b78e0378a0aa2e.hip | // !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/gather.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestGatherSimple(void)
{
typedef typename Vector::value_type T;
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather(map.begin(), map.end(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 6);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 1);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 2);
}
DECLARE_VECTOR_UNITTEST(TestGatherSimple);
void TestGatherFromDeviceToHost(void)
{
// source vector
thrust::device_vector<int> d_src(8);
d_src[0] = 0; d_src[1] = 1; d_src[2] = 2; d_src[3] = 3; d_src[4] = 4; d_src[5] = 5; d_src[6] = 6; d_src[7] = 7;
// gather indices
thrust::host_vector<int> h_map(5);
h_map[0] = 6; h_map[1] = 2; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2;
thrust::device_vector<int> d_map = h_map;
// destination vector
thrust::host_vector<int> h_dst(5, (int) 0);
// with map on the device
thrust::gather(d_map.begin(), d_map.end(), d_src.begin(), h_dst.begin());
ASSERT_EQUAL(6, h_dst[0]);
ASSERT_EQUAL(2, h_dst[1]);
ASSERT_EQUAL(1, h_dst[2]);
ASSERT_EQUAL(7, h_dst[3]);
ASSERT_EQUAL(2, h_dst[4]);
}
DECLARE_UNITTEST(TestGatherFromDeviceToHost);
void TestGatherFromHostToDevice(void)
{
// source vector
thrust::host_vector<int> h_src(8);
h_src[0] = 0; h_src[1] = 1; h_src[2] = 2; h_src[3] = 3; h_src[4] = 4; h_src[5] = 5; h_src[6] = 6; h_src[7] = 7;
// gather indices
thrust::host_vector<int> h_map(5);
h_map[0] = 6; h_map[1] = 2; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2;
thrust::device_vector<int> d_map = h_map;
// destination vector
thrust::device_vector<int> d_dst(5, (int) 0);
// with map on the host
thrust::gather(h_map.begin(), h_map.end(), h_src.begin(), d_dst.begin());
ASSERT_EQUAL(6, d_dst[0]);
ASSERT_EQUAL(2, d_dst[1]);
ASSERT_EQUAL(1, d_dst[2]);
ASSERT_EQUAL(7, d_dst[3]);
ASSERT_EQUAL(2, d_dst[4]);
}
DECLARE_UNITTEST(TestGatherFromHostToDevice);
template <typename T>
void TestGather(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin());
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGather);
template <class Vector>
void TestGatherIfSimple(void)
{
typedef typename Vector::value_type T;
Vector flg(5); // predicate array
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0;
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 0);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 0);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 0);
}
DECLARE_VECTOR_UNITTEST(TestGatherIfSimple);
template <typename T>
struct is_even_gather_if
{
__host__ __device__
bool operator()(const T i) const
{
return (i % 2) == 0;
}
};
template <typename T>
void TestGatherIf(const size_t n)
{
const size_t source_size = ::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather_if(h_map.begin(), h_map.begin(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>());
thrust::gather_if(d_map.begin(), d_map.begin(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIf);
template <typename Vector>
void TestGatherCountingIterator(void)
{
#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && (_DEBUG != 0)
KNOWN_FAILURE;
#endif
typedef typename Vector::value_type T;
Vector source(10);
thrust::sequence(source.begin(), source.end(), 0);
Vector map(10);
thrust::sequence(map.begin(), map.end(), 0);
Vector output(10);
// source has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(map.begin(),
map.end(),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
// map has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)source.size()),
source.begin(),
output.begin());
ASSERT_EQUAL(output, map);
// source and map have any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)output.size()),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
}
DECLARE_VECTOR_UNITTEST(TestGatherCountingIterator);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
| 4f6a14ead98e2580462189ff03b78e0378a0aa2e.cu | #include <unittest/unittest.h>
#include <thrust/gather.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestGatherSimple(void)
{
typedef typename Vector::value_type T;
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather(map.begin(), map.end(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 6);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 1);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 2);
}
DECLARE_VECTOR_UNITTEST(TestGatherSimple);
void TestGatherFromDeviceToHost(void)
{
// source vector
thrust::device_vector<int> d_src(8);
d_src[0] = 0; d_src[1] = 1; d_src[2] = 2; d_src[3] = 3; d_src[4] = 4; d_src[5] = 5; d_src[6] = 6; d_src[7] = 7;
// gather indices
thrust::host_vector<int> h_map(5);
h_map[0] = 6; h_map[1] = 2; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2;
thrust::device_vector<int> d_map = h_map;
// destination vector
thrust::host_vector<int> h_dst(5, (int) 0);
// with map on the device
thrust::gather(d_map.begin(), d_map.end(), d_src.begin(), h_dst.begin());
ASSERT_EQUAL(6, h_dst[0]);
ASSERT_EQUAL(2, h_dst[1]);
ASSERT_EQUAL(1, h_dst[2]);
ASSERT_EQUAL(7, h_dst[3]);
ASSERT_EQUAL(2, h_dst[4]);
}
DECLARE_UNITTEST(TestGatherFromDeviceToHost);
void TestGatherFromHostToDevice(void)
{
// source vector
thrust::host_vector<int> h_src(8);
h_src[0] = 0; h_src[1] = 1; h_src[2] = 2; h_src[3] = 3; h_src[4] = 4; h_src[5] = 5; h_src[6] = 6; h_src[7] = 7;
// gather indices
thrust::host_vector<int> h_map(5);
h_map[0] = 6; h_map[1] = 2; h_map[2] = 1; h_map[3] = 7; h_map[4] = 2;
thrust::device_vector<int> d_map = h_map;
// destination vector
thrust::device_vector<int> d_dst(5, (int) 0);
// with map on the host
thrust::gather(h_map.begin(), h_map.end(), h_src.begin(), d_dst.begin());
ASSERT_EQUAL(6, d_dst[0]);
ASSERT_EQUAL(2, d_dst[1]);
ASSERT_EQUAL(1, d_dst[2]);
ASSERT_EQUAL(7, d_dst[3]);
ASSERT_EQUAL(2, d_dst[4]);
}
DECLARE_UNITTEST(TestGatherFromHostToDevice);
template <typename T>
void TestGather(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather(h_map.begin(), h_map.end(), h_source.begin(), h_output.begin());
thrust::gather(d_map.begin(), d_map.end(), d_source.begin(), d_output.begin());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGather);
template <class Vector>
void TestGatherIfSimple(void)
{
typedef typename Vector::value_type T;
Vector flg(5); // predicate array
Vector map(5); // gather indices
Vector src(8); // source vector
Vector dst(5); // destination vector
flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0;
map[0] = 6; map[1] = 2; map[2] = 1; map[3] = 7; map[4] = 2;
src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; src[5] = 5; src[6] = 6; src[7] = 7;
dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0;
thrust::gather_if(map.begin(), map.end(), flg.begin(), src.begin(), dst.begin());
ASSERT_EQUAL(dst[0], 0);
ASSERT_EQUAL(dst[1], 2);
ASSERT_EQUAL(dst[2], 0);
ASSERT_EQUAL(dst[3], 7);
ASSERT_EQUAL(dst[4], 0);
}
DECLARE_VECTOR_UNITTEST(TestGatherIfSimple);
template <typename T>
struct is_even_gather_if
{
__host__ __device__
bool operator()(const T i) const
{
return (i % 2) == 0;
}
};
template <typename T>
void TestGatherIf(const size_t n)
{
const size_t source_size = std::min((size_t) 10, 2 * n);
// source vectors to gather from
thrust::host_vector<T> h_source = unittest::random_samples<T>(source_size);
thrust::device_vector<T> d_source = h_source;
// gather indices
thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_map[i] = h_map[i] % source_size;
thrust::device_vector<unsigned int> d_map = h_map;
// gather stencil
thrust::host_vector<unsigned int> h_stencil = unittest::random_integers<unsigned int>(n);
for(size_t i = 0; i < n; i++)
h_stencil[i] = h_stencil[i] % 2;
thrust::device_vector<unsigned int> d_stencil = h_stencil;
// gather destination
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::gather_if(h_map.begin(), h_map.begin(), h_stencil.begin(), h_source.begin(), h_output.begin(), is_even_gather_if<unsigned int>());
thrust::gather_if(d_map.begin(), d_map.begin(), d_stencil.begin(), d_source.begin(), d_output.begin(), is_even_gather_if<unsigned int>());
ASSERT_EQUAL(h_output, d_output);
}
DECLARE_VARIABLE_UNITTEST(TestGatherIf);
template <typename Vector>
void TestGatherCountingIterator(void)
{
#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && (_DEBUG != 0)
KNOWN_FAILURE;
#endif
typedef typename Vector::value_type T;
Vector source(10);
thrust::sequence(source.begin(), source.end(), 0);
Vector map(10);
thrust::sequence(map.begin(), map.end(), 0);
Vector output(10);
// source has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(map.begin(),
map.end(),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
// map has any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)source.size()),
source.begin(),
output.begin());
ASSERT_EQUAL(output, map);
// source and map have any_space_tag
thrust::fill(output.begin(), output.end(), 0);
thrust::gather(thrust::make_counting_iterator(0),
thrust::make_counting_iterator((int)output.size()),
thrust::make_counting_iterator(0),
output.begin());
ASSERT_EQUAL(output, map);
}
DECLARE_VECTOR_UNITTEST(TestGatherCountingIterator);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
|
2a9706a91d2c5794b78857dc1b32c634096aeb3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "cuda_SVD.cuh"
__global__
void cudaFindRMSKernel(
float* dev_R0,
float* dev_R1,
float* dev_sum,
int num) {
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_idx < num) {
if (dev_R0[thread_idx] != 0) {
atomicAdd(dev_sum, (dev_R0[thread_idx]-dev_R1[thread_idx])*(dev_R0[thread_idx]-dev_R1[thread_idx]));
}
// __syncthreads();
// printf("%f\n", dev_sum);
thread_idx += blockDim.x * gridDim.x;
}
}
__global__
void cudaMultiplyKernel(
float* dev_P,
float* dev_Q,
float* dev_R1,
int num_users,
int num_items,
int num_f) {
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_idx < num_users * num_items) {
int row = thread_idx / num_items;
int col = thread_idx % num_items;
for (int i = 0; i < num_f; ++i) {
dev_R1[thread_idx] += dev_P[row * num_f + i] * dev_Q[i * num_items + col];
}
// if (thread_idx == 0) {
// printf("%f\n", dev_R1[thread_idx]);
// }
thread_idx += blockDim.x * gridDim.x;
}
}
__global__
void cudaTrainingKernel(
int* dev_data,
float* dev_P,
float* dev_Q,
float step_size,
float regulation,
int num_users,
int num_items,
int num_f,
int batch_size) {
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_idx < batch_size) {
int user = dev_data[3 * thread_idx];
int item = dev_data[3 * thread_idx + 1];
int rate = dev_data[3 * thread_idx + 2];
float e = rate;
for (int i = 0; i < num_f; ++i) {
e -= dev_P[user * num_f + i] * dev_Q[i * num_items + item];
}
// printf("%f \n", e);
for (int i = 0; i < num_f; ++i) {
float update_row = step_size * (e * dev_Q[i * num_items + item] - regulation * dev_P[user * num_f + i]);
float update_col = step_size * (e * dev_P[user * num_f + i] - regulation * dev_Q[i * num_items + item]);
atomicAdd(&dev_P[user * num_f + i], update_row);
atomicAdd(&dev_Q[i * num_items + item], update_col);
}
thread_idx += blockDim.x * gridDim.x;
}
}
float cudaCallFindRMSKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
float* dev_R0,
float* dev_R1,
int num_users,
int num_items) {
float *dev_sum;
hipMalloc((void**)&dev_sum, sizeof(float));
hipMemset(dev_sum, 0, sizeof(float));
hipLaunchKernelGGL(( cudaFindRMSKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
dev_R0,
dev_R1,
dev_sum,
num_users * num_items);
float host_sum = -1;
hipMemcpy(&host_sum, dev_sum, sizeof(float), hipMemcpyDeviceToHost);
// printf("dev_sum: %f\n", host_sum);
hipFree(dev_sum);
return host_sum;
}
void cudaCallMultiplyKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
float* dev_P,
float* dev_Q,
float* dev_R1,
int num_users,
int num_items,
int num_f) {
hipLaunchKernelGGL(( cudaMultiplyKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
dev_P,
dev_Q,
dev_R1,
num_users,
num_items,
num_f);
}
void cudaCallTrainingKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int* dev_data,
float* dev_P,
float* dev_Q,
float step_size,
float regulation,
int num_users,
int num_items,
int num_f,
int batch_size) {
hipLaunchKernelGGL(( cudaTrainingKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
dev_data,
dev_P,
dev_Q,
step_size,
regulation,
num_users,
num_items,
num_f,
batch_size);
}
| 2a9706a91d2c5794b78857dc1b32c634096aeb3f.cu | #include <cstdio>
#include "cuda_SVD.cuh"
__global__
void cudaFindRMSKernel(
float* dev_R0,
float* dev_R1,
float* dev_sum,
int num) {
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_idx < num) {
if (dev_R0[thread_idx] != 0) {
atomicAdd(dev_sum, (dev_R0[thread_idx]-dev_R1[thread_idx])*(dev_R0[thread_idx]-dev_R1[thread_idx]));
}
// __syncthreads();
// printf("%f\n", dev_sum);
thread_idx += blockDim.x * gridDim.x;
}
}
__global__
void cudaMultiplyKernel(
float* dev_P,
float* dev_Q,
float* dev_R1,
int num_users,
int num_items,
int num_f) {
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_idx < num_users * num_items) {
int row = thread_idx / num_items;
int col = thread_idx % num_items;
for (int i = 0; i < num_f; ++i) {
dev_R1[thread_idx] += dev_P[row * num_f + i] * dev_Q[i * num_items + col];
}
// if (thread_idx == 0) {
// printf("%f\n", dev_R1[thread_idx]);
// }
thread_idx += blockDim.x * gridDim.x;
}
}
__global__
void cudaTrainingKernel(
int* dev_data,
float* dev_P,
float* dev_Q,
float step_size,
float regulation,
int num_users,
int num_items,
int num_f,
int batch_size) {
unsigned int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_idx < batch_size) {
int user = dev_data[3 * thread_idx];
int item = dev_data[3 * thread_idx + 1];
int rate = dev_data[3 * thread_idx + 2];
float e = rate;
for (int i = 0; i < num_f; ++i) {
e -= dev_P[user * num_f + i] * dev_Q[i * num_items + item];
}
// printf("%f \n", e);
for (int i = 0; i < num_f; ++i) {
float update_row = step_size * (e * dev_Q[i * num_items + item] - regulation * dev_P[user * num_f + i]);
float update_col = step_size * (e * dev_P[user * num_f + i] - regulation * dev_Q[i * num_items + item]);
atomicAdd(&dev_P[user * num_f + i], update_row);
atomicAdd(&dev_Q[i * num_items + item], update_col);
}
thread_idx += blockDim.x * gridDim.x;
}
}
float cudaCallFindRMSKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
float* dev_R0,
float* dev_R1,
int num_users,
int num_items) {
float *dev_sum;
cudaMalloc((void**)&dev_sum, sizeof(float));
cudaMemset(dev_sum, 0, sizeof(float));
cudaFindRMSKernel<<<blocks, threadsPerBlock>>>(
dev_R0,
dev_R1,
dev_sum,
num_users * num_items);
float host_sum = -1;
cudaMemcpy(&host_sum, dev_sum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("dev_sum: %f\n", host_sum);
cudaFree(dev_sum);
return host_sum;
}
void cudaCallMultiplyKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
float* dev_P,
float* dev_Q,
float* dev_R1,
int num_users,
int num_items,
int num_f) {
cudaMultiplyKernel<<<blocks, threadsPerBlock>>>(
dev_P,
dev_Q,
dev_R1,
num_users,
num_items,
num_f);
}
void cudaCallTrainingKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int* dev_data,
float* dev_P,
float* dev_Q,
float step_size,
float regulation,
int num_users,
int num_items,
int num_f,
int batch_size) {
cudaTrainingKernel<<<blocks, threadsPerBlock>>>(
dev_data,
dev_P,
dev_Q,
step_size,
regulation,
num_users,
num_items,
num_f,
batch_size);
}
|
1c45ef8919cb0278dcd94fe328d0a0b33de52331.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "headers.h"
int main(int argc, char **argv)
{
/* declare file pointers */
char trainingVectorFilename[] = "y_vals.txt";
char trainingSetFilename[] = "X_vals.txt";
char testSetFilename[] = "testSet.txt";
char testResultVectorFilename[] = "ytest.txt";
char sampleEmailFilename[] = "emailVector.txt";
/* define constants */
int const numFeatures = FEATURE_VECTOR_SIZE;
int const numTrainingExamples = TRAINING_SET_SIZE;
int const numTestExamples = TEST_SET_SIZE;
floatType_t const tol = 1.0e-3;
floatType_t const C = 0.1;
char spam[] = "SPAM";
char notSpam[] = "NOT SPAM";
/* define the arrays going to be used */
int *trainingVector, *trainingMatrix, *pred;
int *testVector, *testMatrix;
floatType_t *X, *Y, *W, *Xtest;
/* malloc trainingVector */
trainingVector = (int *) malloc( sizeof(int) * numTrainingExamples );
if( trainingVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
/* read trainingVector from file */
readMatrixFromFile( trainingVectorFilename, trainingVector,
numTrainingExamples, 1 );
/* malloc y */
Y = (floatType_t *) malloc( sizeof(floatType_t) * numTrainingExamples );
if( Y == NULL )
fprintf(stderr,"error malloc y\n");
/* copy result vector into y as float
aloso map 0 values to -1 for training */
for( int i = 0; i < numTrainingExamples; i++ )
{
Y[i] = (floatType_t) trainingVector[i];
if( Y[i] == 0.0 ) Y[i] = -1.0;
} /* end for */
/* malloc the training matrix. each row is a different training
example
*/
trainingMatrix = (int *) malloc( sizeof(int) * numTrainingExamples *
numFeatures );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
/* read training examples from file as a matrix */
readMatrixFromFile( trainingSetFilename, trainingMatrix,
numTrainingExamples, numFeatures );
/* malloc X */
X = (floatType_t *) malloc( sizeof(floatType_t) * numTrainingExamples *
numFeatures );
if( X == NULL )
fprintf(stderr,"error malloc X\n");
/* copy trainingMatrix into X as floats */
for( int i = 0; i < numTrainingExamples * numFeatures; i++ )
X[i] = (floatType_t) trainingMatrix[i];
/* malloc the Weight matrix */
W = (floatType_t *) malloc( sizeof(floatType_t) * numFeatures );
if( W == NULL ) fprintf(stderr,"error malloc yW\n");
/* setup timers */
hipEvent_t start, stop;
CUDA_CALL( hipEventCreate( &start ) );
CUDA_CALL( hipEventCreate( &stop ) );
CUDA_CALL( hipEventRecord( start, 0 ) );
/* call the training function */
svmTrain(X, Y, C,
numFeatures, numTrainingExamples,
tol, W );
/* report time of svmTrain */
CUDA_CALL( hipEventRecord( stop, 0 ) );
CUDA_CALL( hipEventSynchronize( stop ) );
float elapsedTime;
CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for svmTrain is %f sec\n",elapsedTime/1000.0f );
/* malloc a prediction vector which will be the predicted values of the
results vector based on the training function
*/
pred = (int *) malloc( sizeof(int) * numTrainingExamples );
if( pred == NULL ) fprintf(stderr,"problem with malloc p in main\n");
CUDA_CALL( hipEventRecord( start, 0 ) );
/* call the predict function to populate the pred vector */
svmPredict( X, W, numTrainingExamples, numFeatures, pred );
/* report time of svmTrain */
CUDA_CALL( hipEventRecord( stop, 0 ) );
CUDA_CALL( hipEventSynchronize( stop ) );
CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for svmPredict is %f sec\n",elapsedTime/1000.0f );
/* calculate how well the predictions matched the actual values */
double mean = 0.0;
for( int i = 0; i < numTrainingExamples; i++ )
{
mean += (pred[i] == trainingVector[i]) ? 1.0 : 0.0;
} /* end for */
mean /= (double) numTrainingExamples;
printf("Prediction success rate on training set is %f\n",mean*100.0);
/* malloc testVector */
testVector = (int *) malloc( sizeof(int) * numTestExamples );
if( testVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
/* read the test vector */
readMatrixFromFile( testResultVectorFilename, testVector,
numTestExamples, 1 );
/* malloc the test matrix. each row is a different training
example
*/
testMatrix = (int *) malloc( sizeof(int) * numTestExamples *
numFeatures );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
/* read the testSet data */
readMatrixFromFile( testSetFilename, testMatrix,
numTestExamples, numFeatures );
/* malloc Xtest */
Xtest = (floatType_t *) malloc( sizeof(floatType_t) * numTestExamples *
numFeatures );
if( X == NULL )
fprintf(stderr,"error malloc X\n");
/* copy the testMatrix into Xtest as floating point numbers */
for( int i = 0; i < numTestExamples * numFeatures; i++ )
Xtest[i] = (floatType_t) testMatrix[i];
/* predict the test set data using our original classifier */
svmPredict( Xtest, W, numTestExamples, numFeatures, pred );
mean = 0.0;
for( int i = 0; i < numTestExamples; i++ )
{
mean += (pred[i] == testVector[i]) ? 1.0 : 0.0;
} /* end for */
mean /= (double) numTestExamples;
printf("Prediction success rate on test set is %f\n",mean*100.0);
/* read the single test email data */
readMatrixFromFile( sampleEmailFilename, testMatrix,
1, numFeatures );
for( int i = 0; i < numFeatures; i++ )
{
Xtest[i] = (floatType_t) testMatrix[i];
}
/* predict whether the email is spam using our original classifier */
svmPredict( Xtest, W, 1, numFeatures, pred );
printf("Email test results 1 is SPAM 0 is NOT SPAM\n");
printf("File Name %s, classification %d %s\n",
sampleEmailFilename, pred[0], pred[0]==1 ? spam : notSpam);
free(testVector);
free(testMatrix);
free(pred);
free(W);
free(Y);
free(X);
free(Xtest);
free(trainingVector);
free(trainingMatrix);
return 0;
} /* end main */
| 1c45ef8919cb0278dcd94fe328d0a0b33de52331.cu | /*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "headers.h"
int main(int argc, char **argv)
{
/* declare file pointers */
char trainingVectorFilename[] = "y_vals.txt";
char trainingSetFilename[] = "X_vals.txt";
char testSetFilename[] = "testSet.txt";
char testResultVectorFilename[] = "ytest.txt";
char sampleEmailFilename[] = "emailVector.txt";
/* define constants */
int const numFeatures = FEATURE_VECTOR_SIZE;
int const numTrainingExamples = TRAINING_SET_SIZE;
int const numTestExamples = TEST_SET_SIZE;
floatType_t const tol = 1.0e-3;
floatType_t const C = 0.1;
char spam[] = "SPAM";
char notSpam[] = "NOT SPAM";
/* define the arrays going to be used */
int *trainingVector, *trainingMatrix, *pred;
int *testVector, *testMatrix;
floatType_t *X, *Y, *W, *Xtest;
/* malloc trainingVector */
trainingVector = (int *) malloc( sizeof(int) * numTrainingExamples );
if( trainingVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
/* read trainingVector from file */
readMatrixFromFile( trainingVectorFilename, trainingVector,
numTrainingExamples, 1 );
/* malloc y */
Y = (floatType_t *) malloc( sizeof(floatType_t) * numTrainingExamples );
if( Y == NULL )
fprintf(stderr,"error malloc y\n");
/* copy result vector into y as float
aloso map 0 values to -1 for training */
for( int i = 0; i < numTrainingExamples; i++ )
{
Y[i] = (floatType_t) trainingVector[i];
if( Y[i] == 0.0 ) Y[i] = -1.0;
} /* end for */
/* malloc the training matrix. each row is a different training
example
*/
trainingMatrix = (int *) malloc( sizeof(int) * numTrainingExamples *
numFeatures );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
/* read training examples from file as a matrix */
readMatrixFromFile( trainingSetFilename, trainingMatrix,
numTrainingExamples, numFeatures );
/* malloc X */
X = (floatType_t *) malloc( sizeof(floatType_t) * numTrainingExamples *
numFeatures );
if( X == NULL )
fprintf(stderr,"error malloc X\n");
/* copy trainingMatrix into X as floats */
for( int i = 0; i < numTrainingExamples * numFeatures; i++ )
X[i] = (floatType_t) trainingMatrix[i];
/* malloc the Weight matrix */
W = (floatType_t *) malloc( sizeof(floatType_t) * numFeatures );
if( W == NULL ) fprintf(stderr,"error malloc yW\n");
/* setup timers */
cudaEvent_t start, stop;
CUDA_CALL( cudaEventCreate( &start ) );
CUDA_CALL( cudaEventCreate( &stop ) );
CUDA_CALL( cudaEventRecord( start, 0 ) );
/* call the training function */
svmTrain(X, Y, C,
numFeatures, numTrainingExamples,
tol, W );
/* report time of svmTrain */
CUDA_CALL( cudaEventRecord( stop, 0 ) );
CUDA_CALL( cudaEventSynchronize( stop ) );
float elapsedTime;
CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for svmTrain is %f sec\n",elapsedTime/1000.0f );
/* malloc a prediction vector which will be the predicted values of the
results vector based on the training function
*/
pred = (int *) malloc( sizeof(int) * numTrainingExamples );
if( pred == NULL ) fprintf(stderr,"problem with malloc p in main\n");
CUDA_CALL( cudaEventRecord( start, 0 ) );
/* call the predict function to populate the pred vector */
svmPredict( X, W, numTrainingExamples, numFeatures, pred );
/* report time of svmTrain */
CUDA_CALL( cudaEventRecord( stop, 0 ) );
CUDA_CALL( cudaEventSynchronize( stop ) );
CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for svmPredict is %f sec\n",elapsedTime/1000.0f );
/* calculate how well the predictions matched the actual values */
double mean = 0.0;
for( int i = 0; i < numTrainingExamples; i++ )
{
mean += (pred[i] == trainingVector[i]) ? 1.0 : 0.0;
} /* end for */
mean /= (double) numTrainingExamples;
printf("Prediction success rate on training set is %f\n",mean*100.0);
/* malloc testVector */
testVector = (int *) malloc( sizeof(int) * numTestExamples );
if( testVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
/* read the test vector */
readMatrixFromFile( testResultVectorFilename, testVector,
numTestExamples, 1 );
/* malloc the test matrix. each row is a different training
example
*/
testMatrix = (int *) malloc( sizeof(int) * numTestExamples *
numFeatures );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
/* read the testSet data */
readMatrixFromFile( testSetFilename, testMatrix,
numTestExamples, numFeatures );
/* malloc Xtest */
Xtest = (floatType_t *) malloc( sizeof(floatType_t) * numTestExamples *
numFeatures );
if( X == NULL )
fprintf(stderr,"error malloc X\n");
/* copy the testMatrix into Xtest as floating point numbers */
for( int i = 0; i < numTestExamples * numFeatures; i++ )
Xtest[i] = (floatType_t) testMatrix[i];
/* predict the test set data using our original classifier */
svmPredict( Xtest, W, numTestExamples, numFeatures, pred );
mean = 0.0;
for( int i = 0; i < numTestExamples; i++ )
{
mean += (pred[i] == testVector[i]) ? 1.0 : 0.0;
} /* end for */
mean /= (double) numTestExamples;
printf("Prediction success rate on test set is %f\n",mean*100.0);
/* read the single test email data */
readMatrixFromFile( sampleEmailFilename, testMatrix,
1, numFeatures );
for( int i = 0; i < numFeatures; i++ )
{
Xtest[i] = (floatType_t) testMatrix[i];
}
/* predict whether the email is spam using our original classifier */
svmPredict( Xtest, W, 1, numFeatures, pred );
printf("Email test results 1 is SPAM 0 is NOT SPAM\n");
printf("File Name %s, classification %d %s\n",
sampleEmailFilename, pred[0], pred[0]==1 ? spam : notSpam);
free(testVector);
free(testMatrix);
free(pred);
free(W);
free(Y);
free(X);
free(Xtest);
free(trainingVector);
free(trainingMatrix);
return 0;
} /* end main */
|
276823eb2c1495bea3b8d7035f1136a412b28061.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SPDX-FileCopyrightText: 2020 CERN
// SPDX-License-Identifier: Apache-2.0
#include <G4NistManager.hh>
#include <G4Material.hh>
#include <G4Box.hh>
#include <G4LogicalVolume.hh>
#include <G4PVPlacement.hh>
#include <G4ParticleTable.hh>
#include <G4Electron.hh>
#include <G4Positron.hh>
#include <G4Gamma.hh>
#include <G4Proton.hh>
#include <G4ProductionCuts.hh>
#include <G4Region.hh>
#include <G4ProductionCutsTable.hh>
#include <G4UnitsTable.hh>
#include <G4SystemOfUnits.hh>
#include <G4HepEmData.hh>
#include <G4HepEmElectronInit.hh>
#include <G4HepEmMatCutData.hh>
#include <G4HepEmMaterialInit.hh>
#include <G4HepEmParameters.hh>
#include <G4HepEmParametersInit.hh>
#include <G4HepEmElectronManager.hh>
#include <G4HepEmElectronTrack.hh>
#include <G4HepEmElectronInteractionBrem.hh>
#include <G4HepEmElectronInteractionIoni.hh>
#include <G4HepEmPositronInteractionAnnihilation.hh>
// Pull in implementation.
#include <G4HepEmRunUtils.icc>
#include <G4HepEmInteractionUtils.icc>
#include <G4HepEmElectronManager.icc>
#include <G4HepEmElectronInteractionBrem.icc>
#include <G4HepEmElectronInteractionIoni.icc>
#include <G4HepEmPositronInteractionAnnihilation.icc>
#include <CopCore/Global.h>
#include <CopCore/Ranluxpp.h>
static void InitGeant4()
{
// --- Create materials.
G4Material *galactic = G4NistManager::Instance()->FindOrBuildMaterial("G4_Galactic");
G4Material *silicon = G4NistManager::Instance()->FindOrBuildMaterial("G4_Si");
//
// --- Define a world.
G4double worldDim = 1 * m;
G4Box *worldBox = new G4Box("world", worldDim, worldDim, worldDim);
G4LogicalVolume *worldLog = new G4LogicalVolume(worldBox, galactic, "world");
G4PVPlacement *world = new G4PVPlacement(nullptr, {}, worldLog, "world", nullptr, false, 0);
// --- Define a box.
G4double boxDim = 0.5 * m;
G4double boxPos = 0.5 * boxDim;
G4Box *siliconBox = new G4Box("silicon", boxDim, boxDim, boxDim);
G4LogicalVolume *siliconLog = new G4LogicalVolume(siliconBox, silicon, "silicon");
new G4PVPlacement(nullptr, {boxPos, boxPos, boxPos}, siliconLog, "silicon", worldLog, false, 0);
//
// --- Create particles that have secondary production threshold.
G4Gamma::Gamma();
G4Electron::Electron();
G4Positron::Positron();
G4Proton::Proton();
G4ParticleTable *partTable = G4ParticleTable::GetParticleTable();
partTable->SetReadiness();
//
// --- Create production - cuts object and set the secondary production threshold.
G4ProductionCuts *productionCuts = new G4ProductionCuts();
constexpr G4double ProductionCut = 1 * mm;
productionCuts->SetProductionCut(ProductionCut);
//
// --- Register a region for the world.
G4Region *reg = new G4Region("default");
reg->AddRootLogicalVolume(worldLog);
reg->UsedInMassGeometry(true);
reg->SetProductionCuts(productionCuts);
//
// --- Update the couple tables.
G4ProductionCutsTable *theCoupleTable = G4ProductionCutsTable::GetProductionCutsTable();
theCoupleTable->UpdateCoupleTable(world);
}
__constant__ __device__ struct G4HepEmParameters g4HepEmPars;
__constant__ __device__ struct G4HepEmData g4HepEmData;
struct G4HepEmState {
G4HepEmData data;
G4HepEmParameters parameters;
};
static G4HepEmState *InitG4HepEm()
{
G4HepEmState *state = new G4HepEmState;
InitG4HepEmData(&state->data);
InitHepEmParameters(&state->parameters);
InitMaterialAndCoupleData(&state->data, &state->parameters);
InitElectronData(&state->data, &state->parameters, true);
InitElectronData(&state->data, &state->parameters, false);
G4HepEmMatCutData *cutData = state->data.fTheMatCutData;
G4cout << "fNumG4MatCuts = " << cutData->fNumG4MatCuts << ", fNumMatCutData = " << cutData->fNumMatCutData << G4endl;
// Copy to GPU.
CopyG4HepEmDataToGPU(&state->data);
COPCORE_CUDA_CHECK(hipMemcpyToSymbol(g4HepEmPars, &state->parameters, sizeof(G4HepEmParameters)));
// Create G4HepEmData with the device pointers.
G4HepEmData dataOnDevice;
dataOnDevice.fTheMatCutData = state->data.fTheMatCutData_gpu;
dataOnDevice.fTheMaterialData = state->data.fTheMaterialData_gpu;
dataOnDevice.fTheElementData = state->data.fTheElementData_gpu;
dataOnDevice.fTheElectronData = state->data.fTheElectronData_gpu;
dataOnDevice.fThePositronData = state->data.fThePositronData_gpu;
dataOnDevice.fTheSBTableData = state->data.fTheSBTableData_gpu;
// The other pointers should never be used.
dataOnDevice.fTheMatCutData_gpu = nullptr;
dataOnDevice.fTheMaterialData_gpu = nullptr;
dataOnDevice.fTheElementData_gpu = nullptr;
dataOnDevice.fTheElectronData_gpu = nullptr;
dataOnDevice.fThePositronData_gpu = nullptr;
dataOnDevice.fTheSBTableData_gpu = nullptr;
COPCORE_CUDA_CHECK(hipMemcpyToSymbol(g4HepEmData, &dataOnDevice, sizeof(G4HepEmData)));
return state;
}
static void FreeG4HepEm(G4HepEmState *state)
{
FreeG4HepEmData(&state->data);
delete state;
}
class RanluxppDoubleEngine : public G4HepEmRandomEngine {
// Wrapper functions to call into CLHEP::HepRandomEngine.
static __host__ __device__ double flatWrapper(void *object) {
return ((RanluxppDouble*)object)->Rndm();
}
static __host__ __device__ void flatArrayWrapper(void *object, const int size, double* vect) {
for (int i = 0; i < size; i++) {
vect[i] = ((RanluxppDouble*)object)->Rndm();
}
}
public:
__host__ __device__
RanluxppDoubleEngine(RanluxppDouble* engine)
: G4HepEmRandomEngine(/*object=*/engine, &flatWrapper, &flatArrayWrapper) {}
};
__global__ void TransportParticle()
{
RanluxppDouble r;
RanluxppDoubleEngine rnge(&r);
// Init a track.
G4HepEmElectronTrack elTrack;
// To simplify copy&paste...
G4HepEmElectronTrack *theElTrack = &elTrack;
G4HepEmTrack *theTrack = elTrack.GetTrack();
theTrack->SetEKin(100 * GeV);
theTrack->SetMCIndex(1);
const bool isElectron = true;
printf("Starting with %fMeV\n", theTrack->GetEKin());
for (int i = 0; i < 200; i++) {
printf("-----------------------------------------\n");
// Sample the `number-of-interaction-left`.
for (int ip = 0; ip < 3; ++ip) {
if (theTrack->GetNumIALeft(ip) <= 0.) {
theTrack->SetNumIALeft(-::log(r.Rndm()), ip);
}
}
G4HepEmElectronManager::HowFar(&g4HepEmData, &g4HepEmPars, theElTrack);
printf("sampled process: %d, particle travels %fmm\n", theTrack->GetWinnerProcessIndex(),
theTrack->GetGStepLength());
const int iDProc = theTrack->GetWinnerProcessIndex();
bool stopped = G4HepEmElectronManager::PerformContinuous(&g4HepEmData, &g4HepEmPars, theElTrack);
printf("energy after continuous process: %fMeV\n", theTrack->GetEKin());
if (stopped) {
// call annihilation for e+ !!!
if (!isElectron) {
// FIXME !!!
// PerformPositronAnnihilation(tlData, true);
}
return;
} else if (iDProc < 0) {
// No discrete process or on boundary.
continue;
}
// Reset number of interaction left for the winner discrete process.
// (Will be resampled in the next iteration.)
theTrack->SetNumIALeft(-1.0, iDProc);
// Check if a delta interaction happens instead of the real discrete process.
if (G4HepEmElectronManager::CheckDelta(&g4HepEmData, theTrack, r.Rndm())) {
printf("delta interaction happened!\n");
continue;
}
// TODO: Perform the discrete part of the winner interaction.
const int theMCIndx = theTrack->GetMCIndex();
const double theEkin = theTrack->GetEKin();
const double theLogEkin = theTrack->GetLogEKin();
const double theElCut = g4HepEmData.fTheMatCutData->fMatCutData[theMCIndx].fSecElProdCutE;
switch (iDProc) {
case 0: {
// invoke ioni (for e-/e+):
// PerformElectronIoni(tlData, hepEmData, isElectron);
const double deltaEkin = (isElectron)
? G4HepEmElectronInteractionIoni::SampleETransferMoller(theElCut, theEkin, &rnge)
: G4HepEmElectronInteractionIoni::SampleETransferBhabha(theElCut, theEkin, &rnge);
theTrack->SetEKin(theEkin - deltaEkin);
break;
}
case 1: // invoke brem (for e-/e+): either SB- or Rel-Brem
if (theEkin < g4HepEmPars.fElectronBremModelLim) {
// PerformElectronBremSB(tlData, hepEmData, isElectron);
double deltaEkin = G4HepEmElectronInteractionBrem::SampleETransferSB(&g4HepEmData, theEkin, theLogEkin,
theMCIndx, &rnge, isElectron);
theTrack->SetEKin(theEkin - deltaEkin);
} else {
// PerformElectronBremRB(tlData, hepEmData);
double deltaEkin = G4HepEmElectronInteractionBrem::SampleETransferRB(&g4HepEmData, theEkin, theLogEkin,
theMCIndx, &rnge, isElectron);
theTrack->SetEKin(theEkin - deltaEkin);
}
break;
case 2: // invoke annihilation (in-flight) for e+
// PerformPositronAnnihilation(tlData, false);
break;
}
printf("energy after discrete process: %fMeV\n", theTrack->GetEKin());
}
}
int main()
{
InitGeant4();
G4HepEmState *state = InitG4HepEm();
printf("Launching particle transport on GPU\n");
printf("-----------------------------------------\n");
hipLaunchKernelGGL(( TransportParticle), dim3(1), dim3(1), 0, 0, );
COPCORE_CUDA_CHECK(hipDeviceSynchronize());
FreeG4HepEm(state);
return 0;
}
| 276823eb2c1495bea3b8d7035f1136a412b28061.cu | // SPDX-FileCopyrightText: 2020 CERN
// SPDX-License-Identifier: Apache-2.0
#include <G4NistManager.hh>
#include <G4Material.hh>
#include <G4Box.hh>
#include <G4LogicalVolume.hh>
#include <G4PVPlacement.hh>
#include <G4ParticleTable.hh>
#include <G4Electron.hh>
#include <G4Positron.hh>
#include <G4Gamma.hh>
#include <G4Proton.hh>
#include <G4ProductionCuts.hh>
#include <G4Region.hh>
#include <G4ProductionCutsTable.hh>
#include <G4UnitsTable.hh>
#include <G4SystemOfUnits.hh>
#include <G4HepEmData.hh>
#include <G4HepEmElectronInit.hh>
#include <G4HepEmMatCutData.hh>
#include <G4HepEmMaterialInit.hh>
#include <G4HepEmParameters.hh>
#include <G4HepEmParametersInit.hh>
#include <G4HepEmElectronManager.hh>
#include <G4HepEmElectronTrack.hh>
#include <G4HepEmElectronInteractionBrem.hh>
#include <G4HepEmElectronInteractionIoni.hh>
#include <G4HepEmPositronInteractionAnnihilation.hh>
// Pull in implementation.
#include <G4HepEmRunUtils.icc>
#include <G4HepEmInteractionUtils.icc>
#include <G4HepEmElectronManager.icc>
#include <G4HepEmElectronInteractionBrem.icc>
#include <G4HepEmElectronInteractionIoni.icc>
#include <G4HepEmPositronInteractionAnnihilation.icc>
#include <CopCore/Global.h>
#include <CopCore/Ranluxpp.h>
static void InitGeant4()
{
// --- Create materials.
G4Material *galactic = G4NistManager::Instance()->FindOrBuildMaterial("G4_Galactic");
G4Material *silicon = G4NistManager::Instance()->FindOrBuildMaterial("G4_Si");
//
// --- Define a world.
G4double worldDim = 1 * m;
G4Box *worldBox = new G4Box("world", worldDim, worldDim, worldDim);
G4LogicalVolume *worldLog = new G4LogicalVolume(worldBox, galactic, "world");
G4PVPlacement *world = new G4PVPlacement(nullptr, {}, worldLog, "world", nullptr, false, 0);
// --- Define a box.
G4double boxDim = 0.5 * m;
G4double boxPos = 0.5 * boxDim;
G4Box *siliconBox = new G4Box("silicon", boxDim, boxDim, boxDim);
G4LogicalVolume *siliconLog = new G4LogicalVolume(siliconBox, silicon, "silicon");
new G4PVPlacement(nullptr, {boxPos, boxPos, boxPos}, siliconLog, "silicon", worldLog, false, 0);
//
// --- Create particles that have secondary production threshold.
G4Gamma::Gamma();
G4Electron::Electron();
G4Positron::Positron();
G4Proton::Proton();
G4ParticleTable *partTable = G4ParticleTable::GetParticleTable();
partTable->SetReadiness();
//
// --- Create production - cuts object and set the secondary production threshold.
G4ProductionCuts *productionCuts = new G4ProductionCuts();
constexpr G4double ProductionCut = 1 * mm;
productionCuts->SetProductionCut(ProductionCut);
//
// --- Register a region for the world.
G4Region *reg = new G4Region("default");
reg->AddRootLogicalVolume(worldLog);
reg->UsedInMassGeometry(true);
reg->SetProductionCuts(productionCuts);
//
// --- Update the couple tables.
G4ProductionCutsTable *theCoupleTable = G4ProductionCutsTable::GetProductionCutsTable();
theCoupleTable->UpdateCoupleTable(world);
}
__constant__ __device__ struct G4HepEmParameters g4HepEmPars;
__constant__ __device__ struct G4HepEmData g4HepEmData;
struct G4HepEmState {
G4HepEmData data;
G4HepEmParameters parameters;
};
static G4HepEmState *InitG4HepEm()
{
G4HepEmState *state = new G4HepEmState;
InitG4HepEmData(&state->data);
InitHepEmParameters(&state->parameters);
InitMaterialAndCoupleData(&state->data, &state->parameters);
InitElectronData(&state->data, &state->parameters, true);
InitElectronData(&state->data, &state->parameters, false);
G4HepEmMatCutData *cutData = state->data.fTheMatCutData;
G4cout << "fNumG4MatCuts = " << cutData->fNumG4MatCuts << ", fNumMatCutData = " << cutData->fNumMatCutData << G4endl;
// Copy to GPU.
CopyG4HepEmDataToGPU(&state->data);
COPCORE_CUDA_CHECK(cudaMemcpyToSymbol(g4HepEmPars, &state->parameters, sizeof(G4HepEmParameters)));
// Create G4HepEmData with the device pointers.
G4HepEmData dataOnDevice;
dataOnDevice.fTheMatCutData = state->data.fTheMatCutData_gpu;
dataOnDevice.fTheMaterialData = state->data.fTheMaterialData_gpu;
dataOnDevice.fTheElementData = state->data.fTheElementData_gpu;
dataOnDevice.fTheElectronData = state->data.fTheElectronData_gpu;
dataOnDevice.fThePositronData = state->data.fThePositronData_gpu;
dataOnDevice.fTheSBTableData = state->data.fTheSBTableData_gpu;
// The other pointers should never be used.
dataOnDevice.fTheMatCutData_gpu = nullptr;
dataOnDevice.fTheMaterialData_gpu = nullptr;
dataOnDevice.fTheElementData_gpu = nullptr;
dataOnDevice.fTheElectronData_gpu = nullptr;
dataOnDevice.fThePositronData_gpu = nullptr;
dataOnDevice.fTheSBTableData_gpu = nullptr;
COPCORE_CUDA_CHECK(cudaMemcpyToSymbol(g4HepEmData, &dataOnDevice, sizeof(G4HepEmData)));
return state;
}
static void FreeG4HepEm(G4HepEmState *state)
{
FreeG4HepEmData(&state->data);
delete state;
}
class RanluxppDoubleEngine : public G4HepEmRandomEngine {
// Wrapper functions to call into CLHEP::HepRandomEngine.
static __host__ __device__ double flatWrapper(void *object) {
return ((RanluxppDouble*)object)->Rndm();
}
static __host__ __device__ void flatArrayWrapper(void *object, const int size, double* vect) {
for (int i = 0; i < size; i++) {
vect[i] = ((RanluxppDouble*)object)->Rndm();
}
}
public:
__host__ __device__
RanluxppDoubleEngine(RanluxppDouble* engine)
: G4HepEmRandomEngine(/*object=*/engine, &flatWrapper, &flatArrayWrapper) {}
};
__global__ void TransportParticle()
{
RanluxppDouble r;
RanluxppDoubleEngine rnge(&r);
// Init a track.
G4HepEmElectronTrack elTrack;
// To simplify copy&paste...
G4HepEmElectronTrack *theElTrack = &elTrack;
G4HepEmTrack *theTrack = elTrack.GetTrack();
theTrack->SetEKin(100 * GeV);
theTrack->SetMCIndex(1);
const bool isElectron = true;
printf("Starting with %fMeV\n", theTrack->GetEKin());
for (int i = 0; i < 200; i++) {
printf("-----------------------------------------\n");
// Sample the `number-of-interaction-left`.
for (int ip = 0; ip < 3; ++ip) {
if (theTrack->GetNumIALeft(ip) <= 0.) {
theTrack->SetNumIALeft(-std::log(r.Rndm()), ip);
}
}
G4HepEmElectronManager::HowFar(&g4HepEmData, &g4HepEmPars, theElTrack);
printf("sampled process: %d, particle travels %fmm\n", theTrack->GetWinnerProcessIndex(),
theTrack->GetGStepLength());
const int iDProc = theTrack->GetWinnerProcessIndex();
bool stopped = G4HepEmElectronManager::PerformContinuous(&g4HepEmData, &g4HepEmPars, theElTrack);
printf("energy after continuous process: %fMeV\n", theTrack->GetEKin());
if (stopped) {
// call annihilation for e+ !!!
if (!isElectron) {
// FIXME !!!
// PerformPositronAnnihilation(tlData, true);
}
return;
} else if (iDProc < 0) {
// No discrete process or on boundary.
continue;
}
// Reset number of interaction left for the winner discrete process.
// (Will be resampled in the next iteration.)
theTrack->SetNumIALeft(-1.0, iDProc);
// Check if a delta interaction happens instead of the real discrete process.
if (G4HepEmElectronManager::CheckDelta(&g4HepEmData, theTrack, r.Rndm())) {
printf("delta interaction happened!\n");
continue;
}
// TODO: Perform the discrete part of the winner interaction.
const int theMCIndx = theTrack->GetMCIndex();
const double theEkin = theTrack->GetEKin();
const double theLogEkin = theTrack->GetLogEKin();
const double theElCut = g4HepEmData.fTheMatCutData->fMatCutData[theMCIndx].fSecElProdCutE;
switch (iDProc) {
case 0: {
// invoke ioni (for e-/e+):
// PerformElectronIoni(tlData, hepEmData, isElectron);
const double deltaEkin = (isElectron)
? G4HepEmElectronInteractionIoni::SampleETransferMoller(theElCut, theEkin, &rnge)
: G4HepEmElectronInteractionIoni::SampleETransferBhabha(theElCut, theEkin, &rnge);
theTrack->SetEKin(theEkin - deltaEkin);
break;
}
case 1: // invoke brem (for e-/e+): either SB- or Rel-Brem
if (theEkin < g4HepEmPars.fElectronBremModelLim) {
// PerformElectronBremSB(tlData, hepEmData, isElectron);
double deltaEkin = G4HepEmElectronInteractionBrem::SampleETransferSB(&g4HepEmData, theEkin, theLogEkin,
theMCIndx, &rnge, isElectron);
theTrack->SetEKin(theEkin - deltaEkin);
} else {
// PerformElectronBremRB(tlData, hepEmData);
double deltaEkin = G4HepEmElectronInteractionBrem::SampleETransferRB(&g4HepEmData, theEkin, theLogEkin,
theMCIndx, &rnge, isElectron);
theTrack->SetEKin(theEkin - deltaEkin);
}
break;
case 2: // invoke annihilation (in-flight) for e+
// PerformPositronAnnihilation(tlData, false);
break;
}
printf("energy after discrete process: %fMeV\n", theTrack->GetEKin());
}
}
int main()
{
InitGeant4();
G4HepEmState *state = InitG4HepEm();
printf("Launching particle transport on GPU\n");
printf("-----------------------------------------\n");
TransportParticle<<<1, 1>>>();
COPCORE_CUDA_CHECK(cudaDeviceSynchronize());
FreeG4HepEm(state);
return 0;
}
|
4e84cdf8e2cb87c1805c949904435eb558956447.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#ifdef OF_ENABLE_PROFILER
#include <nvtx3/roctracer/roctx.h>
#endif // OF_ENABLE_PROFILER
namespace oneflow {
namespace {
#ifdef OF_ENABLE_PROFILER
static thread_local HashMap<std::string, nvtxRangeId_t> mark2range_id;
#endif
} // namespace
class NvtxOpKernelState final : public user_op::OpKernelState {
public:
NvtxOpKernelState() : counter_(0) {
#ifndef OF_ENABLE_PROFILER
LOG(WARNING) << "To use NVTX, run cmake with -DBUILD_PROFILER=ON";
#endif
}
~NvtxOpKernelState() override = default;
int64_t counter() const { return counter_; }
void IncreaseCount() { counter_ += 1; }
private:
int64_t counter_;
};
class NvtxStartKernel final : public user_op::OpKernel {
public:
NvtxStartKernel() = default;
~NvtxStartKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return std::make_shared<NvtxOpKernelState>();
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state);
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const ShapeView& in_shape = in->shape();
CHECK_EQ(out->shape(), in_shape);
const DataType in_data_type = in->data_type();
CHECK_EQ(out->data_type(), in_data_type);
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), out->mut_dptr<void>(), in->dptr<void>(),
in_shape.elem_cnt() * GetSizeOfDataType(in_data_type));
#ifdef OF_ENABLE_PROFILER
const std::string mark_prefix = ctx->user_op_conf().attr<std::string>("mark_prefix");
const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter());
nvtxRangeId_t range_id = roctxRangeStartA(mark.c_str());
CHECK(mark2range_id.emplace(mark, range_id).second);
kernel_state->IncreaseCount();
#endif // OF_ENABLE_PROFILER
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("nvtx_start")
.SetCreateFn<NvtxStartKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == DeviceType::kGPU)
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false));
return Maybe<void>::Ok();
});
class NvtxEndKernel final : public user_op::OpKernel {
public:
NvtxEndKernel() = default;
~NvtxEndKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return std::make_shared<NvtxOpKernelState>();
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state);
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const ShapeView& in_shape = in->shape();
CHECK_EQ(out->shape(), in_shape);
const DataType in_data_type = in->data_type();
CHECK_EQ(out->data_type(), in_data_type);
#ifdef OF_ENABLE_PROFILER
const std::string mark_prefix = ctx->user_op_conf().attr<std::string>("mark_prefix");
const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter());
auto it = mark2range_id.find(mark.c_str());
CHECK(it != mark2range_id.end());
nvtxRangeId_t range_id = it->second;
mark2range_id.erase(it);
roctxRangeStop(range_id);
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), out->mut_dptr<void>(), in->dptr<void>(),
in_shape.elem_cnt() * GetSizeOfDataType(in_data_type));
kernel_state->IncreaseCount();
#endif
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("nvtx_end")
.SetCreateFn<NvtxEndKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == DeviceType::kGPU)
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false));
return Maybe<void>::Ok();
});
} // namespace oneflow
| 4e84cdf8e2cb87c1805c949904435eb558956447.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#ifdef OF_ENABLE_PROFILER
#include <nvtx3/nvToolsExt.h>
#endif // OF_ENABLE_PROFILER
namespace oneflow {
namespace {
#ifdef OF_ENABLE_PROFILER
static thread_local HashMap<std::string, nvtxRangeId_t> mark2range_id;
#endif
} // namespace
class NvtxOpKernelState final : public user_op::OpKernelState {
public:
NvtxOpKernelState() : counter_(0) {
#ifndef OF_ENABLE_PROFILER
LOG(WARNING) << "To use NVTX, run cmake with -DBUILD_PROFILER=ON";
#endif
}
~NvtxOpKernelState() override = default;
int64_t counter() const { return counter_; }
void IncreaseCount() { counter_ += 1; }
private:
int64_t counter_;
};
class NvtxStartKernel final : public user_op::OpKernel {
public:
NvtxStartKernel() = default;
~NvtxStartKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return std::make_shared<NvtxOpKernelState>();
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state);
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const ShapeView& in_shape = in->shape();
CHECK_EQ(out->shape(), in_shape);
const DataType in_data_type = in->data_type();
CHECK_EQ(out->data_type(), in_data_type);
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), out->mut_dptr<void>(), in->dptr<void>(),
in_shape.elem_cnt() * GetSizeOfDataType(in_data_type));
#ifdef OF_ENABLE_PROFILER
const std::string mark_prefix = ctx->user_op_conf().attr<std::string>("mark_prefix");
const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter());
nvtxRangeId_t range_id = nvtxRangeStartA(mark.c_str());
CHECK(mark2range_id.emplace(mark, range_id).second);
kernel_state->IncreaseCount();
#endif // OF_ENABLE_PROFILER
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("nvtx_start")
.SetCreateFn<NvtxStartKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == DeviceType::kGPU)
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false));
return Maybe<void>::Ok();
});
class NvtxEndKernel final : public user_op::OpKernel {
public:
NvtxEndKernel() = default;
~NvtxEndKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return std::make_shared<NvtxOpKernelState>();
}
private:
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
auto* kernel_state = dynamic_cast<NvtxOpKernelState*>(state);
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const ShapeView& in_shape = in->shape();
CHECK_EQ(out->shape(), in_shape);
const DataType in_data_type = in->data_type();
CHECK_EQ(out->data_type(), in_data_type);
#ifdef OF_ENABLE_PROFILER
const std::string mark_prefix = ctx->user_op_conf().attr<std::string>("mark_prefix");
const std::string mark = mark_prefix + "-" + std::to_string(kernel_state->counter());
auto it = mark2range_id.find(mark.c_str());
CHECK(it != mark2range_id.end());
nvtxRangeId_t range_id = it->second;
mark2range_id.erase(it);
nvtxRangeEnd(range_id);
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), out->mut_dptr<void>(), in->dptr<void>(),
in_shape.elem_cnt() * GetSizeOfDataType(in_data_type));
kernel_state->IncreaseCount();
#endif
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("nvtx_end")
.SetCreateFn<NvtxEndKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == DeviceType::kGPU)
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, false));
return Maybe<void>::Ok();
});
} // namespace oneflow
|
21e36b8cfec44fa826347a3935f5e28f321a31e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void grayscaleVer2D(uchar3* input, uchar3* output, int imageWidth, int imageHeight){
int tid_x = threadIdx.x + blockIdx.x * blockDim.x;
int tid_y = threadIdx.y + blockIdx.y * blockDim.y;
if(tid_x > imageWidth || tid_y > imageHeight) return;
int tid = (int)(tid_x + tid_y * imageWidth);
output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
} | 21e36b8cfec44fa826347a3935f5e28f321a31e2.cu | #include "includes.h"
__global__ void grayscaleVer2D(uchar3* input, uchar3* output, int imageWidth, int imageHeight){
int tid_x = threadIdx.x + blockIdx.x * blockDim.x;
int tid_y = threadIdx.y + blockIdx.y * blockDim.y;
if(tid_x > imageWidth || tid_y > imageHeight) return;
int tid = (int)(tid_x + tid_y * imageWidth);
output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
} |
c824bf1f660a2b5dd4918e798d88dd788d795eb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////////////
// CPU routines
////////////////////////////////////////////////////////////////////////////////
void reduction_gold(float* odata, float* idata, const unsigned int len)
{
*odata = 0;
for(int i=0; i<len; i++) *odata += idata[i];
}
////////////////////////////////////////////////////////////////////////////////
// GPU routines
////////////////////////////////////////////////////////////////////////////////
__global__ void reduction(float *g_odata, float *g_idata)
{
// version 1:shared memory
// dynamically allocated shared memory
extern __shared__ float temp[];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// first, each thread loads data into shared memory
temp[threadIdx.x] = g_idata[tid];
// next, we perform binary tree reduction
for (int d = blockDim.x>>1; d > 0; d >>= 1) {
__syncthreads(); // ensure previous step completed
if (threadIdx.x<d) temp[threadIdx.x] += temp[tid+d];
}
// finally, first thread puts result into global memory
if (threadIdx.x==0) g_odata[blockIdx.x] = temp[0];
//version 2: shuffle
/*
int tid = threadIdx.x + blockDim.x * blockIdx.x;
float sum = g_idata[tid];
// next, we perform binary tree reduction
for (int d = blockDim.x>>1; d > 0; d >>= 1) {
__syncthreads(); // ensure previous step completed
sum+=__shfl_down(sum,d);
}
// finally, first thread puts result into global memory
if (threadIdx.x==0) g_odata[blockIdx.x] = sum; */
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, const char** argv)
{
int num_elements, num_threads, mem_size, shared_mem_size;
float *h_data, *reference, sum;
float *d_idata, *d_odata;
int nBlock = 16;
// initialise card
findCudaDevice(argc, argv);
num_elements = 512;
num_threads = num_elements;
mem_size = sizeof(float) * num_elements;
// allocate host memory to store the input data
// and initialize to integer values between 0 and 1000
h_data = (float*) malloc(mem_size);
for(int i = 0; i < num_elements; i++)
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
// compute reference solutions
reference = (float*) malloc(mem_size);
reduction_gold(&sum, h_data, num_elements);
// allocate device memory input and output arrays
checkCudaErrors( hipMalloc((void**)&d_idata, mem_size) );
checkCudaErrors( hipMalloc((void**)&d_odata, sizeof(float) * nBlock) );
// copy host memory to device input array
checkCudaErrors( hipMemcpy(d_idata, h_data, mem_size,
hipMemcpyHostToDevice) );
// execute the kernel
shared_mem_size = sizeof(float) * num_elements;
hipLaunchKernelGGL(( reduction), dim3(nBlock),dim3(num_threads/nBlock),shared_mem_size, 0, d_odata,d_idata);
getLastCudaError("reduction kernel execution failed");
// copy result from device to host
checkCudaErrors( hipMemcpy(h_data, d_odata, sizeof(float)*nBlock,
hipMemcpyDeviceToHost) );
// check results
int result = 0;
for (int i = 0;i<nBlock;i++){
result+=h_data[i];
}
printf("reduction error = %f\n",result-sum);
// cleanup memory
free(h_data);
free(reference);
checkCudaErrors( hipFree(d_idata) );
checkCudaErrors( hipFree(d_odata) );
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
}
| c824bf1f660a2b5dd4918e798d88dd788d795eb3.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////////////
// CPU routines
////////////////////////////////////////////////////////////////////////////////
void reduction_gold(float* odata, float* idata, const unsigned int len)
{
*odata = 0;
for(int i=0; i<len; i++) *odata += idata[i];
}
////////////////////////////////////////////////////////////////////////////////
// GPU routines
////////////////////////////////////////////////////////////////////////////////
__global__ void reduction(float *g_odata, float *g_idata)
{
// version 1:shared memory
// dynamically allocated shared memory
extern __shared__ float temp[];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// first, each thread loads data into shared memory
temp[threadIdx.x] = g_idata[tid];
// next, we perform binary tree reduction
for (int d = blockDim.x>>1; d > 0; d >>= 1) {
__syncthreads(); // ensure previous step completed
if (threadIdx.x<d) temp[threadIdx.x] += temp[tid+d];
}
// finally, first thread puts result into global memory
if (threadIdx.x==0) g_odata[blockIdx.x] = temp[0];
//version 2: shuffle
/*
int tid = threadIdx.x + blockDim.x * blockIdx.x;
float sum = g_idata[tid];
// next, we perform binary tree reduction
for (int d = blockDim.x>>1; d > 0; d >>= 1) {
__syncthreads(); // ensure previous step completed
sum+=__shfl_down(sum,d);
}
// finally, first thread puts result into global memory
if (threadIdx.x==0) g_odata[blockIdx.x] = sum; */
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, const char** argv)
{
int num_elements, num_threads, mem_size, shared_mem_size;
float *h_data, *reference, sum;
float *d_idata, *d_odata;
int nBlock = 16;
// initialise card
findCudaDevice(argc, argv);
num_elements = 512;
num_threads = num_elements;
mem_size = sizeof(float) * num_elements;
// allocate host memory to store the input data
// and initialize to integer values between 0 and 1000
h_data = (float*) malloc(mem_size);
for(int i = 0; i < num_elements; i++)
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
// compute reference solutions
reference = (float*) malloc(mem_size);
reduction_gold(&sum, h_data, num_elements);
// allocate device memory input and output arrays
checkCudaErrors( cudaMalloc((void**)&d_idata, mem_size) );
checkCudaErrors( cudaMalloc((void**)&d_odata, sizeof(float) * nBlock) );
// copy host memory to device input array
checkCudaErrors( cudaMemcpy(d_idata, h_data, mem_size,
cudaMemcpyHostToDevice) );
// execute the kernel
shared_mem_size = sizeof(float) * num_elements;
reduction<<<nBlock,num_threads/nBlock,shared_mem_size>>>(d_odata,d_idata);
getLastCudaError("reduction kernel execution failed");
// copy result from device to host
checkCudaErrors( cudaMemcpy(h_data, d_odata, sizeof(float)*nBlock,
cudaMemcpyDeviceToHost) );
// check results
int result = 0;
for (int i = 0;i<nBlock;i++){
result+=h_data[i];
}
printf("reduction error = %f\n",result-sum);
// cleanup memory
free(h_data);
free(reference);
checkCudaErrors( cudaFree(d_idata) );
checkCudaErrors( cudaFree(d_odata) );
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
}
|
a2664223cb96a3c94edcc03a5f8e2a6671495f56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmergetfqmr.cu, normal z -> s, Tue Aug 30 09:38:45 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from tfqmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_stfqmr_1_kernel(
int num_rows,
int num_cols,
float alpha,
float sigma,
float *v,
float *Au,
float *u_m,
float *pu_m,
float *u_mp1,
float *w,
float *d,
float *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = u_mp1 - alpha*v;
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha float
scalar
@param[in]
sigma float
scalar
@param[in]
v magmaFloat_ptr
vector
@param[in]
Au magmaFloat_ptr
vector
@param[in,out]
u_m magmaFloat_ptr
vector
@param[in,out]
pu_m magmaFloat_ptr
vector
@param[in,out]
u_mp1 magmaFloat_ptr
vector
@param[in,out]
w magmaFloat_ptr
vector
@param[in,out]
d magmaFloat_ptr
vector
@param[in,out]
Ad magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
float alpha,
float sigma,
magmaFloat_ptr v,
magmaFloat_ptr Au,
magmaFloat_ptr u_m,
magmaFloat_ptr pu_m,
magmaFloat_ptr u_mp1,
magmaFloat_ptr w,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_stfqmr_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma,
v, Au, u_m, pu_m, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_2_kernel(
int num_rows,
int num_cols,
float eta,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magmaFloat_ptr x,
magmaFloat_ptr r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + eta * d
r = r - eta * Ad
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta float
scalar
@param[in]
d magmaFloat_ptr
vector
@param[in]
Ad magmaFloat_ptr
vector
@param[in,out]
x magmaFloat_ptr
vector
@param[in,out]
r magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
float eta,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magmaFloat_ptr x,
magmaFloat_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_stfqmr_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, d, Ad, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_3_kernel(
int num_rows,
int num_cols,
float beta,
float *w,
float *u_m,
float *u_mp1 )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = w + beta*u_mp1
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta float
scalar
@param[in]
w magmaFloat_ptr
vector
@param[in]
u_m magmaFloat_ptr
vector
@param[in,out]
u_mp1 magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
float beta,
magmaFloat_ptr w,
magmaFloat_ptr u_m,
magmaFloat_ptr u_mp1,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_stfqmr_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, w, u_m, u_mp1 );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_4_kernel(
int num_rows,
int num_cols,
float beta,
float *Au_new,
float *v,
float *Au )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
float tmp = Au_new[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ]
+ beta * beta * v[ i+j*num_rows ];
Au[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = Au_new + beta*(Au+beta*v);
Au = Au_new
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta float
scalar
@param[in]
Au_new magmaFloat_ptr
vector
@param[in,out]
v magmaFloat_ptr
vector
@param[in,out]
Au magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
float beta,
magmaFloat_ptr Au_new,
magmaFloat_ptr v,
magmaFloat_ptr Au,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_stfqmr_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, Au_new, v, Au );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_5_kernel(
int num_rows,
int num_cols,
float alpha,
float sigma,
float *v,
float *Au,
float *u_mp1,
float *w,
float *d,
float *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha float
scalar
@param[in]
sigma float
scalar
@param[in]
v magmaFloat_ptr
vector
@param[in]
Au magmaFloat_ptr
vector
@param[in,out]
u_mp1 magmaFloat_ptr
vector
@param[in,out]
w magmaFloat_ptr
vector
@param[in,out]
d magmaFloat_ptr
vector
@param[in,out]
Ad magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
float alpha,
float sigma,
magmaFloat_ptr v,
magmaFloat_ptr Au,
magmaFloat_ptr u_mp1,
magmaFloat_ptr w,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_stfqmr_5_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma,
v, Au, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
| a2664223cb96a3c94edcc03a5f8e2a6671495f56.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zmergetfqmr.cu, normal z -> s, Tue Aug 30 09:38:45 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from tfqmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_stfqmr_1_kernel(
int num_rows,
int num_cols,
float alpha,
float sigma,
float *v,
float *Au,
float *u_m,
float *pu_m,
float *u_mp1,
float *w,
float *d,
float *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ];
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = u_mp1 - alpha*v;
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha float
scalar
@param[in]
sigma float
scalar
@param[in]
v magmaFloat_ptr
vector
@param[in]
Au magmaFloat_ptr
vector
@param[in,out]
u_m magmaFloat_ptr
vector
@param[in,out]
pu_m magmaFloat_ptr
vector
@param[in,out]
u_mp1 magmaFloat_ptr
vector
@param[in,out]
w magmaFloat_ptr
vector
@param[in,out]
d magmaFloat_ptr
vector
@param[in,out]
Ad magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
float alpha,
float sigma,
magmaFloat_ptr v,
magmaFloat_ptr Au,
magmaFloat_ptr u_m,
magmaFloat_ptr pu_m,
magmaFloat_ptr u_mp1,
magmaFloat_ptr w,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_stfqmr_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma,
v, Au, u_m, pu_m, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_2_kernel(
int num_rows,
int num_cols,
float eta,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magmaFloat_ptr x,
magmaFloat_ptr r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + eta * d
r = r - eta * Ad
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta float
scalar
@param[in]
d magmaFloat_ptr
vector
@param[in]
Ad magmaFloat_ptr
vector
@param[in,out]
x magmaFloat_ptr
vector
@param[in,out]
r magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
float eta,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magmaFloat_ptr x,
magmaFloat_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_stfqmr_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, d, Ad, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_3_kernel(
int num_rows,
int num_cols,
float beta,
float *w,
float *u_m,
float *u_mp1 )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u_mp1 = w + beta*u_mp1
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta float
scalar
@param[in]
w magmaFloat_ptr
vector
@param[in]
u_m magmaFloat_ptr
vector
@param[in,out]
u_mp1 magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
float beta,
magmaFloat_ptr w,
magmaFloat_ptr u_m,
magmaFloat_ptr u_mp1,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_stfqmr_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, w, u_m, u_mp1 );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_4_kernel(
int num_rows,
int num_cols,
float beta,
float *Au_new,
float *v,
float *Au )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
float tmp = Au_new[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ]
+ beta * beta * v[ i+j*num_rows ];
Au[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = Au_new + beta*(Au+beta*v);
Au = Au_new
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta float
scalar
@param[in]
Au_new magmaFloat_ptr
vector
@param[in,out]
v magmaFloat_ptr
vector
@param[in,out]
Au magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
float beta,
magmaFloat_ptr Au_new,
magmaFloat_ptr v,
magmaFloat_ptr Au,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_stfqmr_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, Au_new, v, Au );
return MAGMA_SUCCESS;
}
__global__ void
magma_stfqmr_5_kernel(
int num_rows,
int num_cols,
float alpha,
float sigma,
float *v,
float *Au,
float *u_mp1,
float *w,
float *d,
float *Ad )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ];
d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ];
Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
w = w - alpha*Au;
d = pu_m + sigma*d;
Ad = Au + sigma*Ad;
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha float
scalar
@param[in]
sigma float
scalar
@param[in]
v magmaFloat_ptr
vector
@param[in]
Au magmaFloat_ptr
vector
@param[in,out]
u_mp1 magmaFloat_ptr
vector
@param[in,out]
w magmaFloat_ptr
vector
@param[in,out]
d magmaFloat_ptr
vector
@param[in,out]
Ad magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_stfqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
float alpha,
float sigma,
magmaFloat_ptr v,
magmaFloat_ptr Au,
magmaFloat_ptr u_mp1,
magmaFloat_ptr w,
magmaFloat_ptr d,
magmaFloat_ptr Ad,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_stfqmr_5_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma,
v, Au, u_mp1, w, d, Ad );
return MAGMA_SUCCESS;
}
|
00f5a03f89f2ea7f87842ffe05658cbb86800767.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by bartosz on 12/30/20.
//
#include "kernels_hip.cuh"
#include "raycasting.cuh"
__global__
void rayCastingKernel( const TriangleMesh * mesh, PaintScene * scene, const Camera * camera,
const LightSourceSet * lightSources )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if ( x >= scene->width ) return;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( y >= scene->height ) return;
doRayCasting( x, y, mesh, scene, *camera, *lightSources );
} | 00f5a03f89f2ea7f87842ffe05658cbb86800767.cu | //
// Created by bartosz on 12/30/20.
//
#include "kernels.cuh"
#include "raycasting.cuh"
__global__
void rayCastingKernel( const TriangleMesh * mesh, PaintScene * scene, const Camera * camera,
const LightSourceSet * lightSources )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if ( x >= scene->width ) return;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( y >= scene->height ) return;
doRayCasting( x, y, mesh, scene, *camera, *lightSources );
} |
e6f2eea6c6eaa739f45be684701c769abf1ff1c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define EPS2 0.01f
#define G 0.67f
__device__ float3
bodyBodyInteraction2(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
if(distSixth < 1.0f) return ai;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s * G;
ai.y += r.y * s * G;
ai.z += r.z * s * G;
return ai;
}
__global__ void
calculate_forces2(float4 *X, float4 *V, float time)
{
extern __shared__ float3 acceleration[];
float4 myPosition = X[blockIdx.x];
float4 myVelocity = V[blockIdx.x];
float3 acc = { 0.0f, 0.0f, 0.0f };
acceleration[threadIdx.x] = bodyBodyInteraction2(myPosition, X[threadIdx.x], acc);
__syncthreads();
if(threadIdx.x) return;
for(int i=0;i<blockDim.x;i++) {
acc.x += acceleration[i].x;
acc.y += acceleration[i].y;
acc.z += acceleration[i].z;
}
// V = Vo + at
myVelocity.x += acc.x * time;
myVelocity.y += acc.y * time;
myVelocity.z += acc.z * time;
// S = So + Vt
myPosition.x += myVelocity.x * time;
myPosition.y += myVelocity.y * time;
myPosition.z += myVelocity.z * time;
__syncthreads();
X[blockIdx.x] = myPosition;
V[blockIdx.x] = myVelocity;
}
void run2(int N, float time, float4 *X, float4 *V)
{
int NUM_BLOCKS = N;
int NUM_THREADS = N;
int SIZE = N * sizeof(float4);
dim3 block(NUM_THREADS, 1, 1);
dim3 grid(NUM_BLOCKS, 1, 1);
hipLaunchKernelGGL(( calculate_forces2), dim3(grid), dim3(block), SIZE, 0, X,V,time);
hipDeviceSynchronize();
}
| e6f2eea6c6eaa739f45be684701c769abf1ff1c8.cu | #define EPS2 0.01f
#define G 0.67f
__device__ float3
bodyBodyInteraction2(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
if(distSixth < 1.0f) return ai;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s * G;
ai.y += r.y * s * G;
ai.z += r.z * s * G;
return ai;
}
__global__ void
calculate_forces2(float4 *X, float4 *V, float time)
{
extern __shared__ float3 acceleration[];
float4 myPosition = X[blockIdx.x];
float4 myVelocity = V[blockIdx.x];
float3 acc = { 0.0f, 0.0f, 0.0f };
acceleration[threadIdx.x] = bodyBodyInteraction2(myPosition, X[threadIdx.x], acc);
__syncthreads();
if(threadIdx.x) return;
for(int i=0;i<blockDim.x;i++) {
acc.x += acceleration[i].x;
acc.y += acceleration[i].y;
acc.z += acceleration[i].z;
}
// V = Vo + at
myVelocity.x += acc.x * time;
myVelocity.y += acc.y * time;
myVelocity.z += acc.z * time;
// S = So + Vt
myPosition.x += myVelocity.x * time;
myPosition.y += myVelocity.y * time;
myPosition.z += myVelocity.z * time;
__syncthreads();
X[blockIdx.x] = myPosition;
V[blockIdx.x] = myVelocity;
}
void run2(int N, float time, float4 *X, float4 *V)
{
int NUM_BLOCKS = N;
int NUM_THREADS = N;
int SIZE = N * sizeof(float4);
dim3 block(NUM_THREADS, 1, 1);
dim3 grid(NUM_BLOCKS, 1, 1);
calculate_forces2<<< grid, block, SIZE>>>(X,V,time);
cudaDeviceSynchronize();
}
|
139d42b327c29bceb15c88f3db572a5c34a16513.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlarft_kernels.cu, normal z -> s, Wed Jan 2 14:18:51 2019
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define sgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ float shared_data[];
/******************************************************************************/
static __device__
void slarft_gemvcolwise_device(
int m, float *v, float *tau,
float *c, int ldc, float *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
float *dc = c + blockIdx.x * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_CONJ(sum[0]);
#else
tmp = - MAGMA_S_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_S_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_S_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_S_ZERO;
}
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel( int m, float *v, int ldv, float *tau,
float *T, int ldt, int step )
{
slarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel_batched( int m, float **v_array, int ldv, float **tau_array,
float **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
slarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise(
magma_int_t m, magma_int_t step,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( slarft_gemvcolwise_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v, ldv, tau, T, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
float **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( slarft_gemvcolwise_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v_array, ldv, tau_array, T_array, ldt, step);
}
/******************************************************************************/
// sgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
slarft_gemvrowwise_device(
int m, int i,
float *tau,
float *v_ptr, int ldv,
float *x_ptr, int incx,
float *T_ptr, int ldt,
float *W, float* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
float res = MAGMA_S_ZERO;
v_ptr += ldv * ty;
if (tx < sgemv_bs)
{
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0] * (*tau);
}
#endif
}
/******************************************************************************/
// T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
// T(i,i) = tau(i)
__global__ void
slarft_gemvrowwise_kernel(
int m, int i,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
float *W = T +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
__global__ void
slarft_gemvrowwise_kernel_batched(
int m, int i,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
float *W = T_array[batchid] +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise(
magma_int_t m, magma_int_t i,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
hipLaunchKernelGGL(( slarft_gemvrowwise_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
/* sgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
hipLaunchKernelGGL(( slarft_gemvrowwise_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
/*
loop_inside
*/
static __device__ void
slarft_gemv_loop_inside_device(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
float *sdata = (float*)shared_data;
float res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
float *v_ptr = v;
v_ptr += i;
float *x_ptr = v_ptr + i * ldv;
res = MAGMA_S_ZERO;
if (tx < sgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * sgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * sgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
slarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel_batched(
int n, int k,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
slarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
hipLaunchKernelGGL(( slarft_gemv_loop_inside_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
hipLaunchKernelGGL(( slarft_gemv_loop_inside_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
static __device__ void
slarft_strmv_sm32x32_device(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_S_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
slarft_strmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel_batched(
int n, int k, float **tau_array,
float **Tin_array, int ldtin, float **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
slarft_strmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Tin, magma_int_t ldtin,
float *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
hipLaunchKernelGGL(( slarft_strmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Tin_array, magma_int_t ldtin,
float **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
hipLaunchKernelGGL(( slarft_strmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
/******************************************************************************/
static __device__ void
slarft_recstrmv_sm32x32_device(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_S_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
slarft_recstrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel_batched(
int m, int n, float **tau_array,
float **Trec_array, int ldtrec, float **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
slarft_recstrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Trec, magma_int_t ldtrec,
float *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
hipLaunchKernelGGL(( slarft_recstrmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Trec_array, magma_int_t ldtrec,
float **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
hipLaunchKernelGGL(( slarft_recstrmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
| 139d42b327c29bceb15c88f3db572a5c34a16513.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlarft_kernels.cu, normal z -> s, Wed Jan 2 14:18:51 2019
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define sgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ float shared_data[];
/******************************************************************************/
static __device__
void slarft_gemvcolwise_device(
int m, float *v, float *tau,
float *c, int ldc, float *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
float *dc = c + blockIdx.x * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_CONJ(sum[0]);
#else
tmp = - MAGMA_S_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_S_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_S_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_S_ZERO;
}
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel( int m, float *v, int ldv, float *tau,
float *T, int ldt, int step )
{
slarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
/******************************************************************************/
__global__
void slarft_gemvcolwise_kernel_batched( int m, float **v_array, int ldv, float **tau_array,
float **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
slarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise(
magma_int_t m, magma_int_t step,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
slarft_gemvcolwise_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v, ldv, tau, T, ldt, step);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
float **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
slarft_gemvcolwise_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v_array, ldv, tau_array, T_array, ldt, step);
}
/******************************************************************************/
// sgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
slarft_gemvrowwise_device(
int m, int i,
float *tau,
float *v_ptr, int ldv,
float *x_ptr, int incx,
float *T_ptr, int ldt,
float *W, float* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
float res = MAGMA_S_ZERO;
v_ptr += ldv * ty;
if (tx < sgemv_bs)
{
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0] * (*tau);
}
#endif
}
/******************************************************************************/
// T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
// T(i,i) = tau(i)
__global__ void
slarft_gemvrowwise_kernel(
int m, int i,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
float *W = T +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
__global__ void
slarft_gemvrowwise_kernel_batched(
int m, int i,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
float *W = T_array[batchid] +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise(
magma_int_t m, magma_int_t i,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
slarft_gemvrowwise_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
/* sgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
slarft_gemvrowwise_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
/*
loop_inside
*/
static __device__ void
slarft_gemv_loop_inside_device(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
float *sdata = (float*)shared_data;
float res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
float *v_ptr = v;
v_ptr += i;
float *x_ptr = v_ptr + i * ldv;
res = MAGMA_S_ZERO;
if (tx < sgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * sgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * sgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
slarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
__global__ void
slarft_gemv_loop_inside_kernel_batched(
int n, int k,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
slarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
slarft_gemv_loop_inside_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau, v, ldv, T, ldt);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
slarft_gemv_loop_inside_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau_array, v_array, ldv, T_array, ldt);
}
/******************************************************************************/
static __device__ void
slarft_strmv_sm32x32_device(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_S_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
slarft_strmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
__global__ void
slarft_strmv_sm32x32_kernel_batched(
int n, int k, float **tau_array,
float **Tin_array, int ldtin, float **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
slarft_strmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Tin, magma_int_t ldtin,
float *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
slarft_strmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Tin, ldtin, Tout, ldtout);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_strmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Tin_array, magma_int_t ldtin,
float **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
slarft_strmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
/******************************************************************************/
static __device__ void
slarft_recstrmv_sm32x32_device(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_S_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
slarft_recstrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
__global__ void
slarft_recstrmv_sm32x32_kernel_batched(
int m, int n, float **tau_array,
float **Trec_array, int ldtrec, float **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
slarft_recstrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Trec, magma_int_t ldtrec,
float *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
slarft_recstrmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
/******************************************************************************/
extern "C"
void magmablas_slarft_recstrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Trec_array, magma_int_t ldtrec,
float **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
slarft_recstrmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
|
f77ff077f0bd0ebd6241f2e7ea0bf7d7b25cbb54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This work is part of the Core Imaging Library developed by
Visual Analytics and Imaging System Group of the Science Technology
Facilities Council, STFC
Copyright 2017 Daniil Kazantsev
Copyright 2017 Srikanth Nagella, Edoardo Pasca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "shared.h"
#include "dTV_FGP_GPU_core.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
/* CUDA implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case)
* which employs structural similarity of the level sets of two images/volumes, see [1,2]
* The current implementation updates image 1 while image 2 is being fixed.
*
* Input Parameters:
* 1. Noisy image/volume [REQUIRED]
* 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED]
* 3. lambdaPar - regularization parameter [REQUIRED]
* 4. Number of iterations [OPTIONAL]
* 5. eplsilon: tolerance constant [OPTIONAL]
* 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] *
* 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL]
* 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL]
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the Matlab's codes and papers by
* [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems"
* [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 10841106
*/
#define BLKXSIZE2D 16
#define BLKYSIZE2D 16
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
//struct square { __host__ __device__ float operator()(float x) { return x * x; } };
/************************************************/
/*****************2D modules*********************/
/************************************************/
__global__ void GradNorm_func2D_kernel(float *Refd, float *Refd_x, float *Refd_y, float eta, int N, int M, int ImSize)
{
float val1, val2, gradX, gradY, magn;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
/* boundary conditions */
if (xIndex >= N-1) val1 = 0.0f; else val1 = Refd[(xIndex+1) + N*yIndex];
if (yIndex >= M-1) val2 = 0.0f; else val2 = Refd[(xIndex) + N*(yIndex + 1)];
gradX = val1 - Refd[index];
gradY = val2 - Refd[index];
magn = pow(gradX,2) + pow(gradY,2);
magn = sqrt(magn + pow(eta,2));
Refd_x[index] = gradX/magn;
Refd_y[index] = gradY/magn;
}
return;
}
__global__ void ProjectVect_func2D_kernel(float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize)
{
float in_prod;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index]; /* calculate inner product */
R1[index] = R1[index] - in_prod*Refd_x[index];
R2[index] = R2[index] - in_prod*Refd_y[index];
}
return;
}
__global__ void Obj_dfunc2D_kernel(float *Ad, float *D, float *R1, float *R2, int N, int M, int ImSize, float lambda)
{
float val1,val2;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
if (xIndex <= 0) {val1 = 0.0f;} else {val1 = R1[(xIndex-1) + N*yIndex];}
if (yIndex <= 0) {val2 = 0.0f;} else {val2 = R2[xIndex + N*(yIndex-1)];}
//Write final result to global memory
D[index] = Ad[index] - lambda*(R1[index] + R2[index] - val1 - val2);
}
return;
}
__global__ void Grad_dfunc2D_kernel(float *P1, float *P2, float *D, float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize, float multip)
{
float val1,val2,in_prod;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
/* boundary conditions */
if (xIndex >= N-1) val1 = 0.0f; else val1 = D[index] - D[(xIndex+1) + N*yIndex];
if (yIndex >= M-1) val2 = 0.0f; else val2 = D[index] - D[(xIndex) + N*(yIndex + 1)];
in_prod = val1*Refd_x[index] + val2*Refd_y[index]; /* calculate inner product */
val1 = val1 - in_prod*Refd_x[index];
val2 = val2 - in_prod*Refd_y[index];
//Write final result to global memory
P1[index] = R1[index] + multip*val1;
P2[index] = R2[index] + multip*val2;
}
return;
}
__global__ void Proj_dfunc2D_iso_kernel(float *P1, float *P2, int N, int M, int ImSize)
{
float denom;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
denom = pow(P1[index],2) + pow(P2[index],2);
if (denom > 1.0f) {
P1[index] = P1[index]/sqrt(denom);
P2[index] = P2[index]/sqrt(denom);
}
}
return;
}
__global__ void Proj_dfunc2D_aniso_kernel(float *P1, float *P2, int N, int M, int ImSize)
{
float val1, val2;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
val1 = abs(P1[index]);
val2 = abs(P2[index]);
if (val1 < 1.0f) {val1 = 1.0f;}
if (val2 < 1.0f) {val2 = 1.0f;}
P1[index] = P1[index]/val1;
P2[index] = P2[index]/val2;
}
return;
}
__global__ void Rupd_dfunc2D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, float multip2, int N, int M, int ImSize)
{
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]);
R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]);
}
return;
}
__global__ void dTVnonneg2D_kernel(float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
if (Output[index] < 0.0f) Output[index] = 0.0f;
}
}
__global__ void dTVcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void dTVcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void dTVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
__global__ void dTVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
/************************************************/
/*****************3D modules*********************/
/************************************************/
__global__ void GradNorm_func3D_kernel(float *Refd, float *Refd_x, float *Refd_y, float *Refd_z, float eta, int N, int M, int Z, int ImSize)
{
float val1, val2, val3, gradX, gradY, gradZ, magn;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
/* boundary conditions */
if (i >= N-1) val1 = 0.0f; else val1 = Refd[(N*M)*k + (i+1) + N*j];
if (j >= M-1) val2 = 0.0f; else val2 = Refd[(N*M)*k + i + N*(j+1)];
if (k >= Z-1) val3 = 0.0f; else val3 = Refd[(N*M)*(k+1) + i + N*j];
gradX = val1 - Refd[index];
gradY = val2 - Refd[index];
gradZ = val3 - Refd[index];
magn = pow(gradX,2) + pow(gradY,2) + pow(gradZ,2);
magn = sqrt(magn + pow(eta,2));
Refd_x[index] = gradX/magn;
Refd_y[index] = gradY/magn;
Refd_z[index] = gradZ/magn;
}
return;
}
__global__ void ProjectVect_func3D_kernel(float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize)
{
float in_prod;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index] + R3[index]*Refd_z[index]; /* calculate inner product */
R1[index] = R1[index] - in_prod*Refd_x[index];
R2[index] = R2[index] - in_prod*Refd_y[index];
R3[index] = R3[index] - in_prod*Refd_z[index];
}
return;
}
__global__ void Obj_dfunc3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda)
{
float val1,val2,val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(N*M)*(k) + (i-1) + N*j];}
if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(N*M)*(k) + i + N*(j-1)];}
if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(N*M)*(k-1) + i + N*j];}
//Write final result to global memory
D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3);
}
return;
}
__global__ void Grad_dfunc3D_kernel(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize, float multip)
{
float val1,val2,val3,in_prod;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
/* boundary conditions */
if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(N*M)*(k) + (i+1) + N*j];
if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(N*M)*(k) + i + N*(j+1)];
if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(N*M)*(k+1) + i + N*j];
in_prod = val1*Refd_x[index] + val2*Refd_y[index] + val3*Refd_z[index]; /* calculate inner product */
val1 = val1 - in_prod*Refd_x[index];
val2 = val2 - in_prod*Refd_y[index];
val3 = val3 - in_prod*Refd_z[index];
//Write final result to global memory
P1[index] = R1[index] + multip*val1;
P2[index] = R2[index] + multip*val2;
P3[index] = R3[index] + multip*val3;
}
return;
}
__global__ void Proj_dfunc3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float denom,sq_denom;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2);
if (denom > 1.0f) {
sq_denom = 1.0f/sqrt(denom);
P1[index] = P1[index]*sq_denom;
P2[index] = P2[index]*sq_denom;
P3[index] = P3[index]*sq_denom;
}
}
return;
}
__global__ void Proj_dfunc3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float val1, val2, val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
val1 = abs(P1[index]);
val2 = abs(P2[index]);
val3 = abs(P3[index]);
if (val1 < 1.0f) {val1 = 1.0f;}
if (val2 < 1.0f) {val2 = 1.0f;}
if (val3 < 1.0f) {val3 = 1.0f;}
P1[index] = P1[index]/val1;
P2[index] = P2[index]/val2;
P3[index] = P3[index]/val3;
}
return;
}
__global__ void Rupd_dfunc3D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, float multip2, int N, int M, int Z, int ImSize)
{
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]);
R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]);
R3[index] = P3[index] + multip2*(P3[index] - P3_old[index]);
}
return;
}
__global__ void dTVnonneg3D_kernel(float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
if (Output[index] < 0.0f) Output[index] = 0.0f;
}
}
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
////////////MAIN HOST FUNCTION ///////////////
extern "C" int dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iter, float epsil, float eta, int methodTV, int nonneg, int dimX, int dimY, int dimZ)
{
int deviceCount = -1; // number of devices
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No CUDA devices found\n");
return -1;
}
int count = 0, i;
float re, multip,multip2;
re = 0.0f;
float tk = 1.0f;
float tkp1=1.0f;
if (dimZ <= 1) {
/*2D verson*/
int ImSize = dimX*dimY;
float *d_input, *d_update=NULL, *d_update_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *d_InputRef=NULL;
dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D);
dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D));
/*allocate space for images on device*/
checkCudaErrors( hipMalloc((void**)&d_input,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&d_update,ImSize*sizeof(float)) );
if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P1,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P2,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P1_prev,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P2_prev,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&R1,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&R2,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&d_InputRef,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&InputRef_x,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&InputRef_y,ImSize*sizeof(float)) );
checkCudaErrors( hipMemcpy(d_input,Input,ImSize*sizeof(float),hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),hipMemcpyHostToDevice));
hipMemset(P1, 0, ImSize*sizeof(float));
hipMemset(P2, 0, ImSize*sizeof(float));
hipMemset(P1_prev, 0, ImSize*sizeof(float));
hipMemset(P2_prev, 0, ImSize*sizeof(float));
hipMemset(R1, 0, ImSize*sizeof(float));
hipMemset(R2, 0, ImSize*sizeof(float));
hipMemset(InputRef_x, 0, ImSize*sizeof(float));
hipMemset(InputRef_y, 0, ImSize*sizeof(float));
/******************** Run CUDA 2D kernel here ********************/
multip = (1.0f/(8.0f*lambdaPar));
/* calculate gradient vectors for the reference */
hipLaunchKernelGGL(( GradNorm_func2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_InputRef, InputRef_x, InputRef_y, eta, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* The main kernel */
for (i = 0; i < iter; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) {
hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
}
/*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/
hipLaunchKernelGGL(( ProjectVect_func2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* computing the gradient of the objective function */
hipLaunchKernelGGL(( Obj_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, R1, R2, dimX, dimY, ImSize, lambdaPar);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
if (nonneg != 0) {
hipLaunchKernelGGL(( dTVnonneg2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() ); }
/*Taking a step towards minus of the gradient*/
hipLaunchKernelGGL(( Grad_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, d_update, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize, multip);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* projection step */
if (methodTV == 0)hipLaunchKernelGGL(( Proj_dfunc2D_iso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, dimX, dimY, ImSize); /*isotropic TV*/
elsehipLaunchKernelGGL(( Proj_dfunc2D_aniso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, dimX, dimY, ImSize); /*anisotropic TV*/
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f;
multip2 = ((tk-1.0f)/tkp1);
hipLaunchKernelGGL(( Rupd_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, multip2, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, P2, P2_prev, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
tk = tkp1;
if ((epsil != 0.0f) && (i % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
hipLaunchKernelGGL(( dTVResidCalc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, P1, dimX, dimY, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(P1, P1 + ImSize);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + ImSize);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
/***************************************************************/
//copy result matrix from device to host memory
hipMemcpy(Output,d_update,ImSize*sizeof(float),hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_update);
if (epsil != 0.0f) hipFree(d_update_prev);
hipFree(P1);
hipFree(P2);
hipFree(P1_prev);
hipFree(P2_prev);
hipFree(R1);
hipFree(R2);
hipFree(d_InputRef);
hipFree(InputRef_x);
hipFree(InputRef_y);
}
else {
/*3D verson*/
int ImSize = dimX*dimY*dimZ;
float *d_input, *d_update=NULL, *d_update_prev, *P1=NULL, *P2=NULL, *P3=NULL, *P1_prev=NULL, *P2_prev=NULL, *P3_prev=NULL, *R1=NULL, *R2=NULL, *R3=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *InputRef_z=NULL, *d_InputRef=NULL;
dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE);
dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKZSIZE));
/*allocate space for images on device*/
checkCudaErrors( hipMalloc((void**)&d_input,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&d_update,ImSize*sizeof(float)) );
if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P1,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P2,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P3,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P1_prev,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P2_prev,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&P3_prev,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&R1,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&R2,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&R3,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&d_InputRef,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&InputRef_x,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&InputRef_y,ImSize*sizeof(float)) );
checkCudaErrors( hipMalloc((void**)&InputRef_z,ImSize*sizeof(float)) );
checkCudaErrors( hipMemcpy(d_input,Input,ImSize*sizeof(float),hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),hipMemcpyHostToDevice));
hipMemset(P1, 0, ImSize*sizeof(float));
hipMemset(P2, 0, ImSize*sizeof(float));
hipMemset(P3, 0, ImSize*sizeof(float));
hipMemset(P1_prev, 0, ImSize*sizeof(float));
hipMemset(P2_prev, 0, ImSize*sizeof(float));
hipMemset(P3_prev, 0, ImSize*sizeof(float));
hipMemset(R1, 0, ImSize*sizeof(float));
hipMemset(R2, 0, ImSize*sizeof(float));
hipMemset(R3, 0, ImSize*sizeof(float));
hipMemset(InputRef_x, 0, ImSize*sizeof(float));
hipMemset(InputRef_y, 0, ImSize*sizeof(float));
hipMemset(InputRef_z, 0, ImSize*sizeof(float));
/********************** Run CUDA 3D kernel here ********************/
multip = (1.0f/(26.0f*lambdaPar));
/* calculate gradient vectors for the reference */
hipLaunchKernelGGL(( GradNorm_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_InputRef, InputRef_x, InputRef_y, InputRef_z, eta, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* The main kernel */
for (i = 0; i < iter; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) {
hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
}
/*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/
hipLaunchKernelGGL(( ProjectVect_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* computing the gradient of the objective function */
hipLaunchKernelGGL(( Obj_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, R1, R2, R3, dimX, dimY, dimZ, ImSize, lambdaPar);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
if (nonneg != 0) {
hipLaunchKernelGGL(( dTVnonneg3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() ); }
/*Taking a step towards minus of the gradient*/
hipLaunchKernelGGL(( Grad_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, d_update, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize, multip);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
/* projection step */
if (methodTV == 0)hipLaunchKernelGGL(( Proj_dfunc3D_iso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */
elsehipLaunchKernelGGL(( Proj_dfunc3D_aniso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f;
multip2 = ((tk-1.0f)/tkp1);
hipLaunchKernelGGL(( Rupd_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, multip2, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P2, P2_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P3, P3_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
tk = tkp1;
if ((epsil != 0.0f) && (i % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
hipLaunchKernelGGL(( dTVResidCalc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, P1, dimX, dimY, dimZ, ImSize);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(P1, P1 + ImSize);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + ImSize);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
/***************************************************************/
//copy result matrix from device to host memory
hipMemcpy(Output,d_update,ImSize*sizeof(float),hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_update);
if (epsil != 0.0f) hipFree(d_update_prev);
hipFree(P1);
hipFree(P2);
hipFree(P3);
hipFree(P1_prev);
hipFree(P2_prev);
hipFree(P3_prev);
hipFree(R1);
hipFree(R2);
hipFree(R3);
hipFree(InputRef_x);
hipFree(InputRef_y);
hipFree(InputRef_z);
hipFree(d_InputRef);
}
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
hipDeviceSynchronize();
return 0;
}
| f77ff077f0bd0ebd6241f2e7ea0bf7d7b25cbb54.cu | /*
This work is part of the Core Imaging Library developed by
Visual Analytics and Imaging System Group of the Science Technology
Facilities Council, STFC
Copyright 2017 Daniil Kazantsev
Copyright 2017 Srikanth Nagella, Edoardo Pasca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "shared.h"
#include "dTV_FGP_GPU_core.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
/* CUDA implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case)
* which employs structural similarity of the level sets of two images/volumes, see [1,2]
* The current implementation updates image 1 while image 2 is being fixed.
*
* Input Parameters:
* 1. Noisy image/volume [REQUIRED]
* 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED]
* 3. lambdaPar - regularization parameter [REQUIRED]
* 4. Number of iterations [OPTIONAL]
* 5. eplsilon: tolerance constant [OPTIONAL]
* 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] *
* 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL]
* 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL]
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the Matlab's codes and papers by
* [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems"
* [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 1084–1106
*/
#define BLKXSIZE2D 16
#define BLKYSIZE2D 16
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
//struct square { __host__ __device__ float operator()(float x) { return x * x; } };
/************************************************/
/*****************2D modules*********************/
/************************************************/
__global__ void GradNorm_func2D_kernel(float *Refd, float *Refd_x, float *Refd_y, float eta, int N, int M, int ImSize)
{
float val1, val2, gradX, gradY, magn;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
/* boundary conditions */
if (xIndex >= N-1) val1 = 0.0f; else val1 = Refd[(xIndex+1) + N*yIndex];
if (yIndex >= M-1) val2 = 0.0f; else val2 = Refd[(xIndex) + N*(yIndex + 1)];
gradX = val1 - Refd[index];
gradY = val2 - Refd[index];
magn = pow(gradX,2) + pow(gradY,2);
magn = sqrt(magn + pow(eta,2));
Refd_x[index] = gradX/magn;
Refd_y[index] = gradY/magn;
}
return;
}
__global__ void ProjectVect_func2D_kernel(float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize)
{
float in_prod;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index]; /* calculate inner product */
R1[index] = R1[index] - in_prod*Refd_x[index];
R2[index] = R2[index] - in_prod*Refd_y[index];
}
return;
}
__global__ void Obj_dfunc2D_kernel(float *Ad, float *D, float *R1, float *R2, int N, int M, int ImSize, float lambda)
{
float val1,val2;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
if (xIndex <= 0) {val1 = 0.0f;} else {val1 = R1[(xIndex-1) + N*yIndex];}
if (yIndex <= 0) {val2 = 0.0f;} else {val2 = R2[xIndex + N*(yIndex-1)];}
//Write final result to global memory
D[index] = Ad[index] - lambda*(R1[index] + R2[index] - val1 - val2);
}
return;
}
__global__ void Grad_dfunc2D_kernel(float *P1, float *P2, float *D, float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize, float multip)
{
float val1,val2,in_prod;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
/* boundary conditions */
if (xIndex >= N-1) val1 = 0.0f; else val1 = D[index] - D[(xIndex+1) + N*yIndex];
if (yIndex >= M-1) val2 = 0.0f; else val2 = D[index] - D[(xIndex) + N*(yIndex + 1)];
in_prod = val1*Refd_x[index] + val2*Refd_y[index]; /* calculate inner product */
val1 = val1 - in_prod*Refd_x[index];
val2 = val2 - in_prod*Refd_y[index];
//Write final result to global memory
P1[index] = R1[index] + multip*val1;
P2[index] = R2[index] + multip*val2;
}
return;
}
__global__ void Proj_dfunc2D_iso_kernel(float *P1, float *P2, int N, int M, int ImSize)
{
float denom;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
denom = pow(P1[index],2) + pow(P2[index],2);
if (denom > 1.0f) {
P1[index] = P1[index]/sqrt(denom);
P2[index] = P2[index]/sqrt(denom);
}
}
return;
}
__global__ void Proj_dfunc2D_aniso_kernel(float *P1, float *P2, int N, int M, int ImSize)
{
float val1, val2;
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
val1 = abs(P1[index]);
val2 = abs(P2[index]);
if (val1 < 1.0f) {val1 = 1.0f;}
if (val2 < 1.0f) {val2 = 1.0f;}
P1[index] = P1[index]/val1;
P2[index] = P2[index]/val2;
}
return;
}
__global__ void Rupd_dfunc2D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, float multip2, int N, int M, int ImSize)
{
//calculate each thread global index
const int xIndex=blockIdx.x*blockDim.x+threadIdx.x;
const int yIndex=blockIdx.y*blockDim.y+threadIdx.y;
int index = xIndex + N*yIndex;
if ((xIndex < N) && (yIndex < M)) {
R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]);
R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]);
}
return;
}
__global__ void dTVnonneg2D_kernel(float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
if (Output[index] < 0.0f) Output[index] = 0.0f;
}
}
__global__ void dTVcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void dTVcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void dTVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
__global__ void dTVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
/************************************************/
/*****************3D modules*********************/
/************************************************/
__global__ void GradNorm_func3D_kernel(float *Refd, float *Refd_x, float *Refd_y, float *Refd_z, float eta, int N, int M, int Z, int ImSize)
{
float val1, val2, val3, gradX, gradY, gradZ, magn;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
/* boundary conditions */
if (i >= N-1) val1 = 0.0f; else val1 = Refd[(N*M)*k + (i+1) + N*j];
if (j >= M-1) val2 = 0.0f; else val2 = Refd[(N*M)*k + i + N*(j+1)];
if (k >= Z-1) val3 = 0.0f; else val3 = Refd[(N*M)*(k+1) + i + N*j];
gradX = val1 - Refd[index];
gradY = val2 - Refd[index];
gradZ = val3 - Refd[index];
magn = pow(gradX,2) + pow(gradY,2) + pow(gradZ,2);
magn = sqrt(magn + pow(eta,2));
Refd_x[index] = gradX/magn;
Refd_y[index] = gradY/magn;
Refd_z[index] = gradZ/magn;
}
return;
}
__global__ void ProjectVect_func3D_kernel(float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize)
{
float in_prod;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index] + R3[index]*Refd_z[index]; /* calculate inner product */
R1[index] = R1[index] - in_prod*Refd_x[index];
R2[index] = R2[index] - in_prod*Refd_y[index];
R3[index] = R3[index] - in_prod*Refd_z[index];
}
return;
}
__global__ void Obj_dfunc3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda)
{
float val1,val2,val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(N*M)*(k) + (i-1) + N*j];}
if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(N*M)*(k) + i + N*(j-1)];}
if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(N*M)*(k-1) + i + N*j];}
//Write final result to global memory
D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3);
}
return;
}
__global__ void Grad_dfunc3D_kernel(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize, float multip)
{
float val1,val2,val3,in_prod;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
/* boundary conditions */
if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(N*M)*(k) + (i+1) + N*j];
if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(N*M)*(k) + i + N*(j+1)];
if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(N*M)*(k+1) + i + N*j];
in_prod = val1*Refd_x[index] + val2*Refd_y[index] + val3*Refd_z[index]; /* calculate inner product */
val1 = val1 - in_prod*Refd_x[index];
val2 = val2 - in_prod*Refd_y[index];
val3 = val3 - in_prod*Refd_z[index];
//Write final result to global memory
P1[index] = R1[index] + multip*val1;
P2[index] = R2[index] + multip*val2;
P3[index] = R3[index] + multip*val3;
}
return;
}
__global__ void Proj_dfunc3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float denom,sq_denom;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2);
if (denom > 1.0f) {
sq_denom = 1.0f/sqrt(denom);
P1[index] = P1[index]*sq_denom;
P2[index] = P2[index]*sq_denom;
P3[index] = P3[index]*sq_denom;
}
}
return;
}
__global__ void Proj_dfunc3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize)
{
float val1, val2, val3;
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
val1 = abs(P1[index]);
val2 = abs(P2[index]);
val3 = abs(P3[index]);
if (val1 < 1.0f) {val1 = 1.0f;}
if (val2 < 1.0f) {val2 = 1.0f;}
if (val3 < 1.0f) {val3 = 1.0f;}
P1[index] = P1[index]/val1;
P2[index] = P2[index]/val2;
P3[index] = P3[index]/val3;
}
return;
}
__global__ void Rupd_dfunc3D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, float multip2, int N, int M, int Z, int ImSize)
{
//calculate each thread global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if ((i < N) && (j < M) && (k < Z)) {
R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]);
R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]);
R3[index] = P3[index] + multip2*(P3[index] - P3_old[index]);
}
return;
}
__global__ void dTVnonneg3D_kernel(float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
if (Output[index] < 0.0f) Output[index] = 0.0f;
}
}
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
////////////MAIN HOST FUNCTION ///////////////
extern "C" int dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iter, float epsil, float eta, int methodTV, int nonneg, int dimX, int dimY, int dimZ)
{
int deviceCount = -1; // number of devices
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No CUDA devices found\n");
return -1;
}
int count = 0, i;
float re, multip,multip2;
re = 0.0f;
float tk = 1.0f;
float tkp1=1.0f;
if (dimZ <= 1) {
/*2D verson*/
int ImSize = dimX*dimY;
float *d_input, *d_update=NULL, *d_update_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *d_InputRef=NULL;
dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D);
dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D));
/*allocate space for images on device*/
checkCudaErrors( cudaMalloc((void**)&d_input,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&d_update,ImSize*sizeof(float)) );
if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P1,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P2,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P1_prev,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P2_prev,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&R1,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&R2,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&d_InputRef,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&InputRef_x,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&InputRef_y,ImSize*sizeof(float)) );
checkCudaErrors( cudaMemcpy(d_input,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),cudaMemcpyHostToDevice));
cudaMemset(P1, 0, ImSize*sizeof(float));
cudaMemset(P2, 0, ImSize*sizeof(float));
cudaMemset(P1_prev, 0, ImSize*sizeof(float));
cudaMemset(P2_prev, 0, ImSize*sizeof(float));
cudaMemset(R1, 0, ImSize*sizeof(float));
cudaMemset(R2, 0, ImSize*sizeof(float));
cudaMemset(InputRef_x, 0, ImSize*sizeof(float));
cudaMemset(InputRef_y, 0, ImSize*sizeof(float));
/******************** Run CUDA 2D kernel here ********************/
multip = (1.0f/(8.0f*lambdaPar));
/* calculate gradient vectors for the reference */
GradNorm_func2D_kernel<<<dimGrid,dimBlock>>>(d_InputRef, InputRef_x, InputRef_y, eta, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* The main kernel */
for (i = 0; i < iter; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) {
dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
}
/*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/
ProjectVect_func2D_kernel<<<dimGrid,dimBlock>>>(R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* computing the gradient of the objective function */
Obj_dfunc2D_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, R1, R2, dimX, dimY, ImSize, lambdaPar);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
if (nonneg != 0) {
dTVnonneg2D_kernel<<<dimGrid,dimBlock>>>(d_update, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() ); }
/*Taking a step towards minus of the gradient*/
Grad_dfunc2D_kernel<<<dimGrid,dimBlock>>>(P1, P2, d_update, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize, multip);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* projection step */
if (methodTV == 0) Proj_dfunc2D_iso_kernel<<<dimGrid,dimBlock>>>(P1, P2, dimX, dimY, ImSize); /*isotropic TV*/
else Proj_dfunc2D_aniso_kernel<<<dimGrid,dimBlock>>>(P1, P2, dimX, dimY, ImSize); /*anisotropic TV*/
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f;
multip2 = ((tk-1.0f)/tkp1);
Rupd_dfunc2D_kernel<<<dimGrid,dimBlock>>>(P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, multip2, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(P1, P1_prev, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(P2, P2_prev, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
tk = tkp1;
if ((epsil != 0.0f) && (i % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
dTVResidCalc2D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, P1, dimX, dimY, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(P1, P1 + ImSize);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + ImSize);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
/***************************************************************/
//copy result matrix from device to host memory
cudaMemcpy(Output,d_update,ImSize*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_update);
if (epsil != 0.0f) cudaFree(d_update_prev);
cudaFree(P1);
cudaFree(P2);
cudaFree(P1_prev);
cudaFree(P2_prev);
cudaFree(R1);
cudaFree(R2);
cudaFree(d_InputRef);
cudaFree(InputRef_x);
cudaFree(InputRef_y);
}
else {
/*3D verson*/
int ImSize = dimX*dimY*dimZ;
float *d_input, *d_update=NULL, *d_update_prev, *P1=NULL, *P2=NULL, *P3=NULL, *P1_prev=NULL, *P2_prev=NULL, *P3_prev=NULL, *R1=NULL, *R2=NULL, *R3=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *InputRef_z=NULL, *d_InputRef=NULL;
dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE);
dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKZSIZE));
/*allocate space for images on device*/
checkCudaErrors( cudaMalloc((void**)&d_input,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&d_update,ImSize*sizeof(float)) );
if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P1,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P2,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P3,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P1_prev,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P2_prev,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&P3_prev,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&R1,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&R2,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&R3,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&d_InputRef,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&InputRef_x,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&InputRef_y,ImSize*sizeof(float)) );
checkCudaErrors( cudaMalloc((void**)&InputRef_z,ImSize*sizeof(float)) );
checkCudaErrors( cudaMemcpy(d_input,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),cudaMemcpyHostToDevice));
cudaMemset(P1, 0, ImSize*sizeof(float));
cudaMemset(P2, 0, ImSize*sizeof(float));
cudaMemset(P3, 0, ImSize*sizeof(float));
cudaMemset(P1_prev, 0, ImSize*sizeof(float));
cudaMemset(P2_prev, 0, ImSize*sizeof(float));
cudaMemset(P3_prev, 0, ImSize*sizeof(float));
cudaMemset(R1, 0, ImSize*sizeof(float));
cudaMemset(R2, 0, ImSize*sizeof(float));
cudaMemset(R3, 0, ImSize*sizeof(float));
cudaMemset(InputRef_x, 0, ImSize*sizeof(float));
cudaMemset(InputRef_y, 0, ImSize*sizeof(float));
cudaMemset(InputRef_z, 0, ImSize*sizeof(float));
/********************** Run CUDA 3D kernel here ********************/
multip = (1.0f/(26.0f*lambdaPar));
/* calculate gradient vectors for the reference */
GradNorm_func3D_kernel<<<dimGrid,dimBlock>>>(d_InputRef, InputRef_x, InputRef_y, InputRef_z, eta, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* The main kernel */
for (i = 0; i < iter; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) {
dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
}
/*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/
ProjectVect_func3D_kernel<<<dimGrid,dimBlock>>>(R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* computing the gradient of the objective function */
Obj_dfunc3D_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, R1, R2, R3, dimX, dimY, dimZ, ImSize, lambdaPar);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
if (nonneg != 0) {
dTVnonneg3D_kernel<<<dimGrid,dimBlock>>>(d_update, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() ); }
/*Taking a step towards minus of the gradient*/
Grad_dfunc3D_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, d_update, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize, multip);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
/* projection step */
if (methodTV == 0) Proj_dfunc3D_iso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */
else Proj_dfunc3D_aniso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f;
multip2 = ((tk-1.0f)/tkp1);
Rupd_dfunc3D_kernel<<<dimGrid,dimBlock>>>(P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, multip2, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P1, P1_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P2, P2_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P3, P3_prev, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
tk = tkp1;
if ((epsil != 0.0f) && (i % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
dTVResidCalc3D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, P1, dimX, dimY, dimZ, ImSize);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(P1, P1 + ImSize);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + ImSize);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
/***************************************************************/
//copy result matrix from device to host memory
cudaMemcpy(Output,d_update,ImSize*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_update);
if (epsil != 0.0f) cudaFree(d_update_prev);
cudaFree(P1);
cudaFree(P2);
cudaFree(P3);
cudaFree(P1_prev);
cudaFree(P2_prev);
cudaFree(P3_prev);
cudaFree(R1);
cudaFree(R2);
cudaFree(R3);
cudaFree(InputRef_x);
cudaFree(InputRef_y);
cudaFree(InputRef_z);
cudaFree(d_InputRef);
}
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
cudaDeviceSynchronize();
return 0;
}
|
c2af2fbf7633f6ef75781664727bbe9f42b36d5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "solver/sgd.h"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
#include "ml_utils.h"
#include <matrix/matrix.h>
#include <linalg/cusolver_wrappers.h>
namespace ML {
namespace Solver {
using namespace MLCommon;
using namespace MLCommon::LinAlg;
template<typename T>
struct SgdInputs {
T tol;
int n_row;
int n_col;
int n_row2;
int n_col2;
int batch_size;
};
template<typename T>
class SgdTest: public ::testing::TestWithParam<SgdInputs<T> > {
protected:
void linearRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
allocate(data, len);
allocate(labels, params.n_row);
allocate(coef, params.n_col, true);
allocate(coef2, params.n_col, true);
allocate(coef_ref, params.n_col);
allocate(coef2_ref, params.n_col);
T data_h[len] = { 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0 };
updateDevice(data, data_h, len);
T labels_h[params.n_row] = { 6.0, 8.0, 9.0, 11.0 };
updateDevice(labels, labels_h, params.n_row);
T coef_ref_h[params.n_col] = { 2.087, 2.5454557 };
updateDevice(coef_ref, coef_ref_h, params.n_col);
T coef2_ref_h[params.n_col] = { 1.000001, 1.9999998 };
updateDevice(coef2_ref, coef2_ref_h, params.n_col);
bool fit_intercept = false;
intercept = T(0);
int epochs = 2000;
T lr = T(0.01);
ML::lr_type lr_type = ML::lr_type::ADAPTIVE;
T power_t = T(0.5);
T alpha = T(0.0001);
T l1_ratio = T(0.15);
bool shuffle = true;
T tol = T(1e-10);
ML::loss_funct loss = ML::loss_funct::SQRD_LOSS;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(data, params.n_row, params.n_col, labels, coef, &intercept,
fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss,
pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change,
cublas_handle, cusolver_handle);
fit_intercept = true;
intercept2 = T(0);
sgdFit(data, params.n_row, params.n_col, labels, coef2, &intercept2,
fit_intercept, params.batch_size, epochs, ML::lr_type::CONSTANT, lr,
power_t, loss, pen, alpha, l1_ratio, shuffle, tol,
n_iter_no_change, cublas_handle, cusolver_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void logisticRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
T *coef_class;
allocate(data_logreg, len);
allocate(data_logreg_test, len);
allocate(labels_logreg, params.n_row2);
allocate(coef_class, params.n_col2, true);
allocate(pred_log, params.n_row2);
allocate(pred_log_ref, params.n_row2);
T data_h[len] = { 0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25,
-0.15, -7.35, -7.35 };
updateDevice(data_logreg, data_h, len);
T data_test_h[len] = { 0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5,
-1.3, 3.0, -5.0, 15.0 };
updateDevice(data_logreg_test, data_test_h, len);
T labels_logreg_h[params.n_row2] = { 0.0, 1.0, 1.0, 0.0 };
updateDevice(labels_logreg, labels_logreg_h, params.n_row2);
T pred_log_ref_h[params.n_row2] = { 1.0, 0.0, 1.0, 1.0 };
updateDevice(pred_log_ref, pred_log_ref_h, params.n_row2);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(0.0);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::LOG;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(data_logreg, params.n_row2, params.n_col2, labels_logreg,
coef_class, &intercept_class, fit_intercept, params.batch_size, epochs,
lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol,
n_iter_no_change, cublas_handle, cusolver_handle);
sgdPredictBinaryClass(data_logreg_test, params.n_row2, params.n_col2,
coef_class, intercept_class, pred_log, loss, cublas_handle);
CUDA_CHECK(hipFree(coef_class));
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void svmTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
T *coef_class;
allocate(data_svmreg, len);
allocate(data_svmreg_test, len);
allocate(labels_svmreg, params.n_row2);
allocate(coef_class, params.n_col2, true);
allocate(pred_svm, params.n_row2);
allocate(pred_svm_ref, params.n_row2);
T data_h[len] = { 0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25,
-0.15, -7.35, -7.35 };
updateDevice(data_svmreg, data_h, len);
T data_test_h[len] = { 0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5,
-1.3, 3.0, -5.0, 15.0 };
updateDevice(data_svmreg_test, data_test_h, len);
T labels_svmreg_h[params.n_row2] = { 0.0, 1.0, 1.0, 0.0 };
updateDevice(labels_svmreg, labels_svmreg_h, params.n_row2);
T pred_svm_ref_h[params.n_row2] = { 1.0, 0.0, 1.0, 1.0 };
updateDevice(pred_svm_ref, pred_svm_ref_h, params.n_row2);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(1) / T(epochs);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::HINGE;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::L2;
int n_iter_no_change = 10;
sgdFit(data_svmreg, params.n_row2, params.n_col2, labels_svmreg,
coef_class, &intercept_class, fit_intercept, params.batch_size, epochs,
lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol,
n_iter_no_change, cublas_handle, cusolver_handle);
sgdPredictBinaryClass(data_svmreg_test, params.n_row2, params.n_col2,
coef_class, intercept_class, pred_svm, loss, cublas_handle);
CUDA_CHECK(hipFree(coef_class));
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void SetUp() override {
linearRegressionTest();
logisticRegressionTest();
svmTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
CUDA_CHECK(hipFree(coef_ref));
CUDA_CHECK(hipFree(coef2));
CUDA_CHECK(hipFree(coef2_ref));
CUDA_CHECK(hipFree(data_logreg));
CUDA_CHECK(hipFree(data_logreg_test));
CUDA_CHECK(hipFree(labels_logreg));
CUDA_CHECK(hipFree(data_svmreg));
CUDA_CHECK(hipFree(data_svmreg_test));
CUDA_CHECK(hipFree(labels_svmreg));
CUDA_CHECK(hipFree(pred_svm));
CUDA_CHECK(hipFree(pred_svm_ref));
CUDA_CHECK(hipFree(pred_log));
CUDA_CHECK(hipFree(pred_log_ref));
}
protected:
SgdInputs<T> params;
T *data, *labels, *coef, *coef_ref;
T *coef2, *coef2_ref;
T *data_logreg, *data_logreg_test, *labels_logreg;
T *data_svmreg, *data_svmreg_test, *labels_svmreg;
T *pred_svm, *pred_svm_ref, *pred_log, *pred_log_ref;
T intercept, intercept2;
};
const std::vector<SgdInputs<float> > inputsf2 = { { 0.01f, 4, 2, 4, 3, 2 } };
const std::vector<SgdInputs<double> > inputsd2 = { { 0.01, 4, 2, 4, 3, 2 } };
typedef SgdTest<float> SgdTestF;
TEST_P(SgdTestF, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_log_ref, pred_log, params.n_row,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_svm_ref, pred_svm, params.n_row,
CompareApproxAbs<float>(params.tol)));
}
typedef SgdTest<double> SgdTestD;
TEST_P(SgdTestD, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_log_ref, pred_log, params.n_row,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_svm_ref, pred_svm, params.n_row,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestD, ::testing::ValuesIn(inputsd2));
}
} // end namespace ML
| c2af2fbf7633f6ef75781664727bbe9f42b36d5a.cu | #include "solver/sgd.h"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
#include "ml_utils.h"
#include <matrix/matrix.h>
#include <linalg/cusolver_wrappers.h>
namespace ML {
namespace Solver {
using namespace MLCommon;
using namespace MLCommon::LinAlg;
template<typename T>
struct SgdInputs {
T tol;
int n_row;
int n_col;
int n_row2;
int n_col2;
int batch_size;
};
template<typename T>
class SgdTest: public ::testing::TestWithParam<SgdInputs<T> > {
protected:
void linearRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
allocate(data, len);
allocate(labels, params.n_row);
allocate(coef, params.n_col, true);
allocate(coef2, params.n_col, true);
allocate(coef_ref, params.n_col);
allocate(coef2_ref, params.n_col);
T data_h[len] = { 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0 };
updateDevice(data, data_h, len);
T labels_h[params.n_row] = { 6.0, 8.0, 9.0, 11.0 };
updateDevice(labels, labels_h, params.n_row);
T coef_ref_h[params.n_col] = { 2.087, 2.5454557 };
updateDevice(coef_ref, coef_ref_h, params.n_col);
T coef2_ref_h[params.n_col] = { 1.000001, 1.9999998 };
updateDevice(coef2_ref, coef2_ref_h, params.n_col);
bool fit_intercept = false;
intercept = T(0);
int epochs = 2000;
T lr = T(0.01);
ML::lr_type lr_type = ML::lr_type::ADAPTIVE;
T power_t = T(0.5);
T alpha = T(0.0001);
T l1_ratio = T(0.15);
bool shuffle = true;
T tol = T(1e-10);
ML::loss_funct loss = ML::loss_funct::SQRD_LOSS;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(data, params.n_row, params.n_col, labels, coef, &intercept,
fit_intercept, params.batch_size, epochs, lr_type, lr, power_t, loss,
pen, alpha, l1_ratio, shuffle, tol, n_iter_no_change,
cublas_handle, cusolver_handle);
fit_intercept = true;
intercept2 = T(0);
sgdFit(data, params.n_row, params.n_col, labels, coef2, &intercept2,
fit_intercept, params.batch_size, epochs, ML::lr_type::CONSTANT, lr,
power_t, loss, pen, alpha, l1_ratio, shuffle, tol,
n_iter_no_change, cublas_handle, cusolver_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void logisticRegressionTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
T *coef_class;
allocate(data_logreg, len);
allocate(data_logreg_test, len);
allocate(labels_logreg, params.n_row2);
allocate(coef_class, params.n_col2, true);
allocate(pred_log, params.n_row2);
allocate(pred_log_ref, params.n_row2);
T data_h[len] = { 0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25,
-0.15, -7.35, -7.35 };
updateDevice(data_logreg, data_h, len);
T data_test_h[len] = { 0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5,
-1.3, 3.0, -5.0, 15.0 };
updateDevice(data_logreg_test, data_test_h, len);
T labels_logreg_h[params.n_row2] = { 0.0, 1.0, 1.0, 0.0 };
updateDevice(labels_logreg, labels_logreg_h, params.n_row2);
T pred_log_ref_h[params.n_row2] = { 1.0, 0.0, 1.0, 1.0 };
updateDevice(pred_log_ref, pred_log_ref_h, params.n_row2);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(0.0);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::LOG;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::NONE;
int n_iter_no_change = 10;
sgdFit(data_logreg, params.n_row2, params.n_col2, labels_logreg,
coef_class, &intercept_class, fit_intercept, params.batch_size, epochs,
lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol,
n_iter_no_change, cublas_handle, cusolver_handle);
sgdPredictBinaryClass(data_logreg_test, params.n_row2, params.n_col2,
coef_class, intercept_class, pred_log, loss, cublas_handle);
CUDA_CHECK(cudaFree(coef_class));
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void svmTest() {
params = ::testing::TestWithParam<SgdInputs<T>>::GetParam();
int len = params.n_row2 * params.n_col2;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
T *coef_class;
allocate(data_svmreg, len);
allocate(data_svmreg_test, len);
allocate(labels_svmreg, params.n_row2);
allocate(coef_class, params.n_col2, true);
allocate(pred_svm, params.n_row2);
allocate(pred_svm_ref, params.n_row2);
T data_h[len] = { 0.1, -2.1, 5.4, 5.4, -1.5, -2.15, 2.65, 2.65, 3.25,
-0.15, -7.35, -7.35 };
updateDevice(data_svmreg, data_h, len);
T data_test_h[len] = { 0.3, 1.1, 2.1, -10.1, 0.5, 2.5, -3.55, -20.5,
-1.3, 3.0, -5.0, 15.0 };
updateDevice(data_svmreg_test, data_test_h, len);
T labels_svmreg_h[params.n_row2] = { 0.0, 1.0, 1.0, 0.0 };
updateDevice(labels_svmreg, labels_svmreg_h, params.n_row2);
T pred_svm_ref_h[params.n_row2] = { 1.0, 0.0, 1.0, 1.0 };
updateDevice(pred_svm_ref, pred_svm_ref_h, params.n_row2);
bool fit_intercept = true;
T intercept_class = T(0);
int epochs = 1000;
T lr = T(0.05);
ML::lr_type lr_type = ML::lr_type::CONSTANT;
T power_t = T(0.5);
T alpha = T(1) / T(epochs);
T l1_ratio = T(0.0);
bool shuffle = false;
T tol = T(0.0);
ML::loss_funct loss = ML::loss_funct::HINGE;
MLCommon::Functions::penalty pen = MLCommon::Functions::penalty::L2;
int n_iter_no_change = 10;
sgdFit(data_svmreg, params.n_row2, params.n_col2, labels_svmreg,
coef_class, &intercept_class, fit_intercept, params.batch_size, epochs,
lr_type, lr, power_t, loss, pen, alpha, l1_ratio, shuffle, tol,
n_iter_no_change, cublas_handle, cusolver_handle);
sgdPredictBinaryClass(data_svmreg_test, params.n_row2, params.n_col2,
coef_class, intercept_class, pred_svm, loss, cublas_handle);
CUDA_CHECK(cudaFree(coef_class));
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void SetUp() override {
linearRegressionTest();
logisticRegressionTest();
svmTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
CUDA_CHECK(cudaFree(coef_ref));
CUDA_CHECK(cudaFree(coef2));
CUDA_CHECK(cudaFree(coef2_ref));
CUDA_CHECK(cudaFree(data_logreg));
CUDA_CHECK(cudaFree(data_logreg_test));
CUDA_CHECK(cudaFree(labels_logreg));
CUDA_CHECK(cudaFree(data_svmreg));
CUDA_CHECK(cudaFree(data_svmreg_test));
CUDA_CHECK(cudaFree(labels_svmreg));
CUDA_CHECK(cudaFree(pred_svm));
CUDA_CHECK(cudaFree(pred_svm_ref));
CUDA_CHECK(cudaFree(pred_log));
CUDA_CHECK(cudaFree(pred_log_ref));
}
protected:
SgdInputs<T> params;
T *data, *labels, *coef, *coef_ref;
T *coef2, *coef2_ref;
T *data_logreg, *data_logreg_test, *labels_logreg;
T *data_svmreg, *data_svmreg_test, *labels_svmreg;
T *pred_svm, *pred_svm_ref, *pred_log, *pred_log_ref;
T intercept, intercept2;
};
const std::vector<SgdInputs<float> > inputsf2 = { { 0.01f, 4, 2, 4, 3, 2 } };
const std::vector<SgdInputs<double> > inputsd2 = { { 0.01, 4, 2, 4, 3, 2 } };
typedef SgdTest<float> SgdTestF;
TEST_P(SgdTestF, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_log_ref, pred_log, params.n_row,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_svm_ref, pred_svm, params.n_row,
CompareApproxAbs<float>(params.tol)));
}
typedef SgdTest<double> SgdTestD;
TEST_P(SgdTestD, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_log_ref, pred_log, params.n_row,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_svm_ref, pred_svm, params.n_row,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(SgdTests, SgdTestD, ::testing::ValuesIn(inputsd2));
}
} // end namespace ML
|
454f0c3c576298b2103b88fb6119b2641ad7dbe0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "internal.h"
namespace {
__global__ void cast_16bit_8bit_array_kernel(const uint16_t* arr16bits, uint8_t* arr8bits, int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr8bits[i] = (uint8_t)arr16bits[i];
}
__global__ void cast_8bit_16bit_array_kernel(const uint8_t* arr8bits, uint16_t* arr16bits, int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr16bits[i] = (uint16_t)arr8bits[i];
}
}
namespace sgm {
namespace details {
void cast_16bit_8bit_array(const uint16_t* arr16bits, uint8_t* arr8bits, int num_elements) {
for (int mod = 1024; mod != 0; mod >>= 1) {
if (num_elements % mod == 0) {
cast_16bit_8bit_array_kernel << <num_elements / mod, mod >> >(arr16bits, arr8bits, num_elements);
break;
}
}
}
void cast_8bit_16bit_array(const uint8_t* arr8bits, uint16_t* arr16bits, int num_elements) {
for (int mod = 1024; mod != 0; mod >>= 1) {
if (num_elements % mod == 0) {
cast_8bit_16bit_array_kernel << <num_elements / mod, mod >> >(arr8bits, arr16bits, num_elements);
break;
}
}
}
}
}
| 454f0c3c576298b2103b88fb6119b2641ad7dbe0.cu | /*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "internal.h"
namespace {
__global__ void cast_16bit_8bit_array_kernel(const uint16_t* arr16bits, uint8_t* arr8bits, int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr8bits[i] = (uint8_t)arr16bits[i];
}
__global__ void cast_8bit_16bit_array_kernel(const uint8_t* arr8bits, uint16_t* arr16bits, int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr16bits[i] = (uint16_t)arr8bits[i];
}
}
namespace sgm {
namespace details {
void cast_16bit_8bit_array(const uint16_t* arr16bits, uint8_t* arr8bits, int num_elements) {
for (int mod = 1024; mod != 0; mod >>= 1) {
if (num_elements % mod == 0) {
cast_16bit_8bit_array_kernel << <num_elements / mod, mod >> >(arr16bits, arr8bits, num_elements);
break;
}
}
}
void cast_8bit_16bit_array(const uint8_t* arr8bits, uint16_t* arr16bits, int num_elements) {
for (int mod = 1024; mod != 0; mod >>= 1) {
if (num_elements % mod == 0) {
cast_8bit_16bit_array_kernel << <num_elements / mod, mod >> >(arr8bits, arr16bits, num_elements);
break;
}
}
}
}
}
|
34ec5a22b6dcbe5f9c2274cdad82bd653f8ae0ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/mask_resize_layer.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype bilinear_interpolate(const Dtype* bottom_data, const int input_height, const int input_width, Dtype inverse_y, Dtype inverse_x) {
// deal with cases that inverse elements are out of feature map boundary
if (inverse_y < -0.5 || inverse_y > input_height - 0.5 || inverse_x < -0.5 || inverse_x > input_width - 0.5) {
return 0.0;
}
if (inverse_y <= 0) inverse_y = 0;
if (inverse_x <= 0) inverse_x = 0;
int h_low = (int) inverse_y;
int w_low = (int) inverse_x;
int h_high;
int w_high;
if (h_low >= input_height - 1) {
h_high = h_low = input_height - 1;
inverse_y = (Dtype) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= input_width - 1) {
w_high = w_low = input_width - 1;
inverse_x = (Dtype) w_low;
} else {
w_high = w_low + 1;
}
Dtype lh = inverse_y - h_low;
Dtype lw = inverse_x - w_low;
Dtype hh = 1 - lh, hw = 1 - lw;
// do bilinear interpolation
Dtype v1 = bottom_data[h_low * input_width + w_low];
Dtype v2 = bottom_data[h_low * input_width + w_high];
Dtype v3 = bottom_data[h_high * input_width + w_low];
Dtype v4 = bottom_data[h_high * input_width + w_high];
Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename Dtype>
__global__ void MaskResizeForward(const int nthreads, const Dtype* bottom_data, const int output_width, const int output_height, const int output_channels, const int input_width, const int input_height, const int input_channels, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) is an element in output mask
int w = index % output_width;
int h = (index / output_width) % output_height;
int c = (index / output_width / output_height) % output_channels;
int n = index / output_width / output_height / output_channels;
Dtype ratio_h = static_cast<Dtype>(input_height) / static_cast<Dtype>(output_height);
Dtype ratio_w = static_cast<Dtype>(input_width) / static_cast<Dtype>(output_width);
Dtype inverse_x = w * ratio_w;
Dtype inverse_y = h * ratio_h;
const Dtype* offset_bottom_data = bottom_data + (n * input_channels + c) * input_height * input_width;
top_data[index] = bilinear_interpolate(offset_bottom_data, input_height, input_width, inverse_y, inverse_x);
}
}
template <typename Dtype>
void MaskResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
hipLaunchKernelGGL(( MaskResizeForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, output_width_, output_height_, output_channels_, input_width_, input_height_, input_channels_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__device__ Dtype getGradientWeight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width)
{
if (argmax_h < -0.5 || argmax_h >(height - 0.5) || argmax_w < -0.5 || argmax_w >(width - 0.5))
{
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
}
else
argmax_h_high = argmax_h_low + 1;
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
}
else
argmax_w_high = argmax_w_low + 1;
Dtype weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
}
else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename Dtype>
__global__ void MaskResizeBackward(const int nthreads, const Dtype* top_diff, const int output_width, const int output_height, const int output_channels, const int input_width, const int input_height, const int input_channels, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) is an element in input mask
int w = index % input_width;
int h = (index / input_width) % input_height;
int c = (index / input_width / input_height) % input_channels;
int n = index / input_width / input_height / input_channels;
Dtype gradient = 0.0;
Dtype ratio_h = static_cast<Dtype>(input_height) / static_cast<Dtype>(output_height);
Dtype ratio_w = static_cast<Dtype>(input_width)/ static_cast<Dtype>(output_width);
Dtype map_x = static_cast<Dtype>(w) / ratio_w;
Dtype map_y = static_cast<Dtype>(h) / ratio_h;
int output_h_start = floor(map_y);
int output_w_start = floor(map_x);
int output_h_end = output_h_start + 1;
int output_w_end = output_w_start + 1;
int offset = (n * output_channels + c) * output_height * output_width;
const Dtype* offset_top_diff = top_diff + offset;
for (int ph = output_h_start; ph <= output_h_end; ++ph) {
for (int pw = output_w_start; pw <= output_w_end; ++pw) {
// map the output index back to feature map index
Dtype iw = static_cast<Dtype>(pw) * ratio_w;
Dtype ih = static_cast<Dtype>(ph) * ratio_h;
// check whether bottom element of this index will affect output element
if (fabs(iw - w) >= 1 || fabs(ih - h) >= 1) {
continue;
}
Dtype weight = getGradientWeight(ih, iw, h, w, input_height, input_width);
gradient += weight * offset_top_diff[ph * output_width + pw];
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void MaskResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
hipLaunchKernelGGL(( MaskResizeBackward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, output_width_, output_height_, output_channels_, input_width_, input_height_, input_channels_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(MaskResizeLayer);
}
| 34ec5a22b6dcbe5f9c2274cdad82bd653f8ae0ba.cu | #include "caffe/layers/mask_resize_layer.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype bilinear_interpolate(const Dtype* bottom_data, const int input_height, const int input_width, Dtype inverse_y, Dtype inverse_x) {
// deal with cases that inverse elements are out of feature map boundary
if (inverse_y < -0.5 || inverse_y > input_height - 0.5 || inverse_x < -0.5 || inverse_x > input_width - 0.5) {
return 0.0;
}
if (inverse_y <= 0) inverse_y = 0;
if (inverse_x <= 0) inverse_x = 0;
int h_low = (int) inverse_y;
int w_low = (int) inverse_x;
int h_high;
int w_high;
if (h_low >= input_height - 1) {
h_high = h_low = input_height - 1;
inverse_y = (Dtype) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= input_width - 1) {
w_high = w_low = input_width - 1;
inverse_x = (Dtype) w_low;
} else {
w_high = w_low + 1;
}
Dtype lh = inverse_y - h_low;
Dtype lw = inverse_x - w_low;
Dtype hh = 1 - lh, hw = 1 - lw;
// do bilinear interpolation
Dtype v1 = bottom_data[h_low * input_width + w_low];
Dtype v2 = bottom_data[h_low * input_width + w_high];
Dtype v3 = bottom_data[h_high * input_width + w_low];
Dtype v4 = bottom_data[h_high * input_width + w_high];
Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename Dtype>
__global__ void MaskResizeForward(const int nthreads, const Dtype* bottom_data, const int output_width, const int output_height, const int output_channels, const int input_width, const int input_height, const int input_channels, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) is an element in output mask
int w = index % output_width;
int h = (index / output_width) % output_height;
int c = (index / output_width / output_height) % output_channels;
int n = index / output_width / output_height / output_channels;
Dtype ratio_h = static_cast<Dtype>(input_height) / static_cast<Dtype>(output_height);
Dtype ratio_w = static_cast<Dtype>(input_width) / static_cast<Dtype>(output_width);
Dtype inverse_x = w * ratio_w;
Dtype inverse_y = h * ratio_h;
const Dtype* offset_bottom_data = bottom_data + (n * input_channels + c) * input_height * input_width;
top_data[index] = bilinear_interpolate(offset_bottom_data, input_height, input_width, inverse_y, inverse_x);
}
}
template <typename Dtype>
void MaskResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
MaskResizeForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (count, bottom_data, output_width_, output_height_, output_channels_, input_width_, input_height_, input_channels_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__device__ Dtype getGradientWeight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width)
{
if (argmax_h < -0.5 || argmax_h >(height - 0.5) || argmax_w < -0.5 || argmax_w >(width - 0.5))
{
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
}
else
argmax_h_high = argmax_h_low + 1;
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
}
else
argmax_w_high = argmax_w_low + 1;
Dtype weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
}
else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename Dtype>
__global__ void MaskResizeBackward(const int nthreads, const Dtype* top_diff, const int output_width, const int output_height, const int output_channels, const int input_width, const int input_height, const int input_channels, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) is an element in input mask
int w = index % input_width;
int h = (index / input_width) % input_height;
int c = (index / input_width / input_height) % input_channels;
int n = index / input_width / input_height / input_channels;
Dtype gradient = 0.0;
Dtype ratio_h = static_cast<Dtype>(input_height) / static_cast<Dtype>(output_height);
Dtype ratio_w = static_cast<Dtype>(input_width)/ static_cast<Dtype>(output_width);
Dtype map_x = static_cast<Dtype>(w) / ratio_w;
Dtype map_y = static_cast<Dtype>(h) / ratio_h;
int output_h_start = floor(map_y);
int output_w_start = floor(map_x);
int output_h_end = output_h_start + 1;
int output_w_end = output_w_start + 1;
int offset = (n * output_channels + c) * output_height * output_width;
const Dtype* offset_top_diff = top_diff + offset;
for (int ph = output_h_start; ph <= output_h_end; ++ph) {
for (int pw = output_w_start; pw <= output_w_end; ++pw) {
// map the output index back to feature map index
Dtype iw = static_cast<Dtype>(pw) * ratio_w;
Dtype ih = static_cast<Dtype>(ph) * ratio_h;
// check whether bottom element of this index will affect output element
if (fabs(iw - w) >= 1 || fabs(ih - h) >= 1) {
continue;
}
Dtype weight = getGradientWeight(ih, iw, h, w, input_height, input_width);
gradient += weight * offset_top_diff[ph * output_width + pw];
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void MaskResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
MaskResizeBackward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (count, top_diff, output_width_, output_height_, output_channels_, input_width_, input_height_, input_channels_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(MaskResizeLayer);
}
|
be53ed5e6bedac8e2ffdb2e563e41c3dce4c0517.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
bools[index] = (idata[index] == 0) ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (bools[index] != 0) {
odata[indices[index]] = idata[index];
}
}
}
}
| be53ed5e6bedac8e2ffdb2e563e41c3dce4c0517.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
bools[index] = (idata[index] == 0) ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
if (bools[index] != 0) {
odata[indices[index]] = idata[index];
}
}
}
}
|
b72772505e744be1a4b743b6ff1e4bc4744aa301.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
//#include "sceneStructs.h"
#include "Scene.h"
extern Scene *scene;
#define SHOW_TIMING 0
struct keep
{
__host__ __device__ bool operator()(const Triangle t)
{
return (!t.keep);
}
};
static int width = 0;
static int height = 0;
static int *dev_bufIdx = NULL;
static VertexIn *dev_bufVertex = NULL;
static Triangle *dev_primitives = NULL;
static Edge *dev_edges = NULL;
static Fragment *dev_depthbuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
static glm::mat4 matrix;
static glm::vec3 camDir;
static Light light;
static Camera cam;
//Things added
static VertexOut *dev_outVertex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// Writes fragment colors to the framebuffer
__global__
void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
framebuffer[index] = depthbuffer[index].color;
}
}
//Kernel function to figure out vertex in transformes space and NDC
__global__
void kernVertexShader(int numVertices, int w, int h, VertexIn * inVertex, VertexOut *outVertex, Camera cam)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numVertices)
{
glm::vec4 outPoint = glm::vec4(inVertex[index].pos.x, inVertex[index].pos.y, inVertex[index].pos.z, 1.0f);
outVertex[index].transformedPos = multiplyMV(cam.model, outPoint);
outPoint = cam.cameraMatrix * outPoint;
if(outPoint.w != 0)
outPoint /= outPoint.w;
//In NDC
// outVertex[index].pos = glm::vec3(outPoint);
//In Device Coordinates
outVertex[index].pos.x = outPoint.x * w;
outVertex[index].pos.y = outPoint.y * h;
outVertex[index].pos.z = outPoint.z;
outVertex[index].nor = multiplyMV(cam.inverseTransposeModel, glm::vec4(inVertex[index].nor, 0.0f));
// outVertex[index].col = glm::vec3(0,0,1);
// outVertex[index].nor = inVertex[index].nor;
// printf ("InVertex : %f %f \nOutVertex : %f %f \n\n", inVertex[index].pos.x, inVertex[index].pos.y, outVertex[index].pos.x, outVertex[index].pos.y);
}
}
//Kernel function to assemble triangles
__global__
void kernPrimitiveAssembly(int numTriangles, VertexOut *outVertex, VertexIn *inVertex, Triangle *triangles, int* indices, glm::vec3 camDir, bool backFaceCulling)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numTriangles)
{
int k_3 = 3 * index;
Triangle &t = triangles[index];
//Find the triangle normal
glm::vec3 triNor = (outVertex[k_3].nor + outVertex[k_3+1].nor + outVertex[k_3+2].nor);
// printf ("Tri Normal : %f %f %f\n", triNor.x, triNor.y, triNor.z);
// printf ("Cam Dir : %f %f %f\n", camDir.x, camDir.y, camDir.z);
if(backFaceCulling && glm::dot(triNor, camDir) > 0.0f)
{
//Triangle facing away from the camera
// Mark for deletion
t.keep = false;
}
else
{
//Else save it
t.keep = true;
t.vOut[0] = outVertex[indices[k_3]];
t.vOut[1] = outVertex[indices[k_3+1]];
t.vOut[2] = outVertex[indices[k_3+2]];
t.vIn[0] = inVertex[indices[k_3]];
t.vIn[1] = inVertex[indices[k_3+1]];
t.vIn[2] = inVertex[indices[k_3+2]];
}
}
}
//Kernel function to assemble edges
__global__
void kernEdgeAssembly(int numTriangles, VertexOut *outVertex, Edge *edge, int* indices)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numTriangles)
{
int k_3 = 3 * index;
edge[indices[k_3]].v1 = outVertex[k_3].pos;
edge[indices[k_3]].v2 = outVertex[k_3+1].pos;
edge[indices[k_3+1]].v1 = outVertex[k_3+1].pos;
edge[indices[k_3+1]].v2 = outVertex[k_3+2].pos;
edge[indices[k_3+2]].v1 = outVertex[k_3+2].pos;
edge[indices[k_3+2]].v2 = outVertex[k_3].pos;
}
}
//Kernel function to draw axis
__global__
void kernDrawAxis(int w, int h, Fragment *fragments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
if((x - w*0.5f) == 0)
{
fragments[index].color = glm::vec3(0, 1, 0);
}
else if((y - h*0.5f) == 0)
{
fragments[index].color = glm::vec3(1, 0, 0);
}
else if(x == 0 || x == w-1)
{
fragments[index].color = glm::vec3(1);
}
else if(y == 0 || y == h)
{
fragments[index].color = glm::vec3(1);
}
}
}
//Kernel function to clear the depth and color buffer
__global__
void kernClearFragmentBuffer(int w, int h, Fragment *fragments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
Fragment &f = fragments[index];
glm::vec3 ZERO(0.0f);
f.color = ZERO;
f.depth[0] = INT_MAX;
f.depth[1] = INT_MAX;
f.depth[2] = INT_MAX;
f.depth[3] = INT_MAX;
f.c[0] = ZERO;
f.c[1] = ZERO;
f.c[2] = ZERO;
f.c[3] = ZERO;
f.primitiveCol[0] = ZERO;
f.primitiveCol[1] = ZERO;
f.primitiveCol[2] = ZERO;
f.primitiveCol[3] = ZERO;
f.primitiveNor[0] = ZERO;
f.primitiveNor[1] = ZERO;
f.primitiveNor[2] = ZERO;
f.primitiveNor[3] = ZERO;
f.primitivePos[0] = ZERO;
f.primitivePos[1] = ZERO;
f.primitivePos[2] = ZERO;
f.primitivePos[3] = ZERO;
}
}
//Kernel function to rasterize the triangle
__global__
void kernRasterizeTraingles(int w, int h, Fragment *fragments, Triangle *triangles, int numTriangles, Camera cam, bool antiAliasing)
{
//Rasterization per triangle
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numTriangles)
{
Triangle &t = triangles[index];
glm::vec3 tri[3];
tri[0] = t.vOut[0].pos;
tri[1] = t.vOut[1].pos;
tri[2] = t.vOut[2].pos;
AABB aabb = getAABBForTriangle(tri);
glm::ivec3 min, max;
//Attempted clipping
min.x = glm::clamp(aabb.min.x, -(float)w*0.5f+1, (float)w*0.5f-1);
min.y = glm::clamp(aabb.min.y, -(float)h*0.5f+1, (float)h*0.5f-1);
max.x = glm::clamp(aabb.max.x, -(float)w*0.5f+1, (float)w*0.5f-1);
max.y = glm::clamp(aabb.max.y, -(float)h*0.5f+1, (float)h*0.5f-1);
for(int i=min.x-1; i<=max.x+1; ++i)
{
for(int j=min.y-1; j<=max.y+1; ++j)
{
glm::vec2 point[4];
int iterCount;
if(antiAliasing)
{
point[0] = glm::vec2(float(i) - 0.25f, float(j) - 0.25f);
point[1] = glm::vec2(float(i) - 0.25f, float(j) + 0.25f);
point[2] = glm::vec2(float(i) + 0.25f, float(j) - 0.25f);
point[3] = glm::vec2(float(i) + 0.25f, float(j) + 0.25f);
iterCount = 4;
}
else
{
point[0] = glm::ivec2(i,j);
iterCount = 1;
}
for(int k=0; k<iterCount; ++k)
{
glm::vec3 barycentric = calculateBarycentricCoordinate(tri, point[k]);
if(isBarycentricCoordInBounds(barycentric))
{
glm::vec3 triIn[3];
VertexIn tvIn[3] = {t.vIn[0], t.vIn[1], t.vIn[2]};
triIn[0] = t.vOut[0].transformedPos;
triIn[1] = t.vOut[1].transformedPos;
triIn[2] = t.vOut[2].transformedPos;
int fragIndex = int((i+w*0.5) + (j + h*0.5)*w);
int depth = getZAtCoordinate(barycentric, triIn) * 10000;
//Depth testing
if(depth < fragments[fragIndex].depth[k])
{
atomicMin(&fragments[fragIndex].depth[k], depth);
Fragment &f = fragments[fragIndex];
//Fragment shading data
f.primitiveNor[k] = barycentric.x * t.vOut[0].nor +
barycentric.y * t.vOut[1].nor +
barycentric.z * t.vOut[2].nor;
f.primitivePos[k] = barycentric.x * t.vOut[0].transformedPos +
barycentric.y * t.vOut[1].transformedPos +
barycentric.z * t.vOut[2].transformedPos;
f.primitiveCol[k] = barycentric.x * tvIn[0].col +
barycentric.y * tvIn[1].col +
barycentric.z * tvIn[2].col;
}
}
}
}
}
}
}
__global__
void kernFragmentShader(int w, int h, Fragment * fragment, Light light1, Light light2, bool antiAliasing)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int fragIndex = x + (y * w);
if (x < w && y < h)
{
Fragment & f = fragment[fragIndex];
if((f.depth[0] < INT_MAX) || (f.depth[1] < INT_MAX) || (f.depth[2] < INT_MAX) || (f.depth[3] < INT_MAX))
{
if(!antiAliasing)
{
f.color = calculateFragColor(f.primitiveNor[0], f.primitivePos[0], f.primitiveCol[0], light1, light2);
}
else
{
f.color = 0.25f * (calculateFragColor(f.primitiveNor[0], f.primitivePos[0], f.primitiveCol[0], light1, light2) +
calculateFragColor(f.primitiveNor[1], f.primitivePos[1], f.primitiveCol[1], light1, light2) +
calculateFragColor(f.primitiveNor[2], f.primitivePos[2], f.primitiveCol[2], light1, light2) +
calculateFragColor(f.primitiveNor[3], f.primitivePos[3], f.primitiveCol[3], light1, light2)
);
}
}
}
}
//Kernel function to rasterize points
__global__
void kernRasterizePoints(int numVertices, int w, int h, Fragment *fragments, VertexOut * vertices, Camera cam, Light light1, Light light2)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numVertices)
{
glm::ivec2 point(vertices[index].pos.x, vertices[index].pos.y);
//If point within bounds
if(point.x > -w*0.5
&& point.x < w*0.5f
&& point.y > -h*0.5f
&& point.y < h*0.5f )
{
//Color the corresponding fragment
fragments[int((point.x + w*0.5f) + (point.y + h*0.5f)*w)].color = glm::vec3(1.0f);
}
}
}
//Kernel function to rasterize lines
__global__
void kernRasterizeLines(int numVertices, int w, int h, Fragment *fragments, Edge *edge, Camera cam, Light light1, Light light2)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numVertices)
{
Edge &e = edge[index];
glm::vec2 v1(e.v1.x, e.v1.y);
glm::vec2 v2(e.v2.x, e.v2.y);
//Clamp edge to screen boundaries
v1.x = glm::clamp(v1.x, -(float)w*0.5f, (float)w*0.5f);
v1.y = glm::clamp(v1.y, -(float)h*0.5f, (float)h*0.5f);
v2.x = glm::clamp(v2.x, -(float)w*0.5f, (float)w*0.5f);
v2.y = glm::clamp(v2.y, -(float)h*0.5f, (float)h*0.5f);
float m = (v2.y - v1.y) / (v2.x - v1.x);
int inc;
if(m > 1)
{
if(v1.y > v2.y)
{
inc = -1;
}
else
{
inc = 1;
}
int i, j;
for(j=v1.y; j!=(int)v2.y; j += inc)
{
i = ((float)j - v1.y) / m + v1.x;
fragments[int((i + w*0.5f) + (j + h*0.5f)*w)].color = glm::vec3(1.0f);
}
}
else
{
if(v1.x > v2.x)
{
inc = -1;
}
else
{
inc = 1;
}
int i, j;
for(i=v1.x; i!=(int)v2.x; i += inc)
{
j = m * ((float)i - v1.x) + v1.y;
fragments[int((i + w*0.5f) + (j + h*0.5f)*w)].color = glm::vec3(1.0f);
}
}
}
}
__global__
void kernAntiAliasing(int numTriangles, int w, int h, Fragment * fragment, Triangle * triangles)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < numTriangles)
{
Triangle &t = triangles[index];
glm::vec3 tri[3];
tri[0] = t.vOut[0].pos;
tri[1] = t.vOut[1].pos;
tri[2] = t.vOut[2].pos;
AABB aabb = getAABBForTriangle(tri);
glm::ivec3 min, max;
//Attempted clipping
min.x = glm::clamp(aabb.min.x, -(float)w*0.5f+2, (float)w*0.5f-2);
min.y = glm::clamp(aabb.min.y, -(float)h*0.5f+2, (float)h*0.5f-2);
max.x = glm::clamp(aabb.max.x, -(float)w*0.5f+2, (float)w*0.5f-2);
max.y = glm::clamp(aabb.max.y, -(float)h*0.5f+2, (float)h*0.5f-2);
for(int i=min.x-1; i<=max.x+1; ++i)
{
for(int j=min.y-1; j<=max.y+1; ++j)
{
int fragIndex = int((i+w*0.5) + (j + h*0.5)*w);
int fragIndex0 = int((i+1 + w*0.5) + (j+1 + h*0.5)*w);
int fragIndex1 = int((i+1 + w*0.5) + (j-1 + h*0.5)*w);
int fragIndex2 = int((i-1 + w*0.5) + (j+1 + h*0.5)*w);
int fragIndex3 = int((i-1 + w*0.5) + (j-1 + h*0.5)*w);
int fragIndex4 = int((i+1 + w*0.5) + (j + h*0.5)*w);
int fragIndex5 = int((i-1 + w*0.5) + (j + h*0.5)*w);
int fragIndex6 = int((i + w*0.5) + (j+1 + h*0.5)*w);
int fragIndex7 = int((i + w*0.5) + (j-1 + h*0.5)*w);
fragment[fragIndex].color = 0.25f * fragment[fragIndex].color +
0.125f * (fragment[fragIndex4].color+
fragment[fragIndex5].color+
fragment[fragIndex6].color+
fragment[fragIndex7].color) +
0.0625f* (fragment[fragIndex0].color+
fragment[fragIndex1].color+
fragment[fragIndex2].color+
fragment[fragIndex3].color);
}
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_depthbuffer);
hipMalloc(&dev_depthbuffer, width * height * sizeof(Fragment));
hipMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol) {
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
hipFree(dev_bufIdx);
hipMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
hipMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), hipMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
}
hipFree(dev_bufVertex);
hipMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
hipMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), hipMemcpyHostToDevice);
hipFree(dev_primitives);
hipMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle));
hipMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle));
hipFree(dev_outVertex);
hipMalloc((void**)&dev_outVertex, vertCount * sizeof(VertexOut));
hipFree(dev_edges);
hipMalloc((void**)&dev_edges, vertCount * sizeof(Edge));
checkCUDAError("rasterizeSetBuffers");
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
if(scene->run)
{
int numThreads = 128;
int numBlocks;
int numTriangles = vertCount/3;
scene->run = false;
Camera &cam = scene->cam;
Light &light1 = scene->light1;
Light &light2 = scene->light2;
//Clear the color and depth buffers
hipLaunchKernelGGL(( kernClearFragmentBuffer), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer);
//Drawing axis
hipLaunchKernelGGL(( kernDrawAxis), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer);
switch (scene->renderMode)
{
case TRIANGLES:
{
hipEvent_t startAll, stopAll;
hipEventCreate(&startAll);
hipEventCreate(&stopAll);
hipEventRecord(startAll);
Triangle *dev_primitivesEnd;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//------------------------------Vertex Shading------------------------------------
hipEventRecord(start);
//Do vertex shading
numBlocks = (vertCount + numThreads -1)/numThreads;
hipLaunchKernelGGL(( kernVertexShader), dim3(numBlocks), dim3(numThreads), 0, 0, vertCount, width, height, dev_bufVertex, dev_outVertex, cam);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Vertex Shading: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
//-----------------------------Primitive Assembly-------------------------------------------
hipEventRecord(start);
//Do primitive (triangle) assembly
numBlocks = (numTriangles + numThreads -1)/numThreads;
hipLaunchKernelGGL(( kernPrimitiveAssembly), dim3(numBlocks), dim3(numThreads), 0, 0, numTriangles, dev_outVertex, dev_bufVertex, dev_primitives, dev_bufIdx, cam.dir, scene->backFaceCulling);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Primitive Assembly: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
if(scene->backFaceCulling)
{
//Back face culling
dev_primitivesEnd = dev_primitives + numTriangles;
dev_primitivesEnd = thrust::remove_if(thrust::device, dev_primitives, dev_primitivesEnd, keep());
numTriangles = dev_primitivesEnd - dev_primitives;
}
//--------------------------------Rasterization---------------------------------------
hipEventRecord(start);
//Rasterization per triangle
numBlocks = (numTriangles + numThreads -1)/numThreads;
hipLaunchKernelGGL(( kernRasterizeTraingles), dim3(numBlocks), dim3(numThreads), 0, 0, width, height, dev_depthbuffer, dev_primitives, numTriangles, cam, scene->antiAliasing);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Rasterize Triangle: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
//------------------------------------Fragment Shading----------------------------------------
hipEventRecord(start);
hipLaunchKernelGGL(( kernFragmentShader), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer, light1, light2, scene->antiAliasing);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Fragment Shader: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
// if(scene->antiAliasing)
// {
// kernAntiAliasing<<<numBlocks, numThreads>>>(numTriangles, width, height, dev_depthbuffer, dev_primitives);
// }
hipEventRecord(stopAll);
hipEventSynchronize(stopAll);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, startAll, stopAll);
if(SHOW_TIMING)
std::cout<<"Time All: "<<milliseconds<<std::endl;
std::cout<<std::endl;
break;
}
case POINTS:
{
//Do vertex shading
numBlocks = (vertCount + numThreads -1)/numThreads;
hipLaunchKernelGGL(( kernVertexShader), dim3(numBlocks), dim3(numThreads), 0, 0, vertCount, width, height, dev_bufVertex, dev_outVertex, cam);
//Rasterization per vertex
hipLaunchKernelGGL(( kernRasterizePoints), dim3(numBlocks), dim3(numThreads), 0, 0, vertCount, width, height, dev_depthbuffer, dev_outVertex, cam, light1, light2);
break;
}
case LINES:
{
//Do vertex shading
numBlocks = (vertCount + numThreads -1)/numThreads;
hipLaunchKernelGGL(( kernVertexShader), dim3(numBlocks), dim3(numThreads), 0, 0, vertCount, width, height, dev_bufVertex, dev_outVertex, cam);
//Do primitive (edge) assembly
numBlocks = (numTriangles + numThreads -1)/numThreads;
hipLaunchKernelGGL(( kernEdgeAssembly), dim3(numBlocks), dim3(numThreads), 0, 0, numTriangles, dev_outVertex, dev_edges, dev_bufIdx);
//Rasterization per edge
numBlocks = (vertCount + numThreads -1)/numThreads;
hipLaunchKernelGGL(( kernRasterizeLines), dim3(numBlocks), dim3(numThreads), 0, 0, vertCount, width, height, dev_depthbuffer, dev_edges, cam, light1, light2);
break;
}
}
}
// Copy depthbuffer colors into framebuffer
hipLaunchKernelGGL(( render), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer, dev_framebuffer);
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
//Save image data to write to file
hipMemcpy(scene->imageColor, dev_framebuffer, width*height*sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("rasterize");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
hipFree(dev_bufIdx);
dev_bufIdx = NULL;
hipFree(dev_bufVertex);
dev_bufVertex = NULL;
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_depthbuffer);
dev_depthbuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_outVertex);
dev_outVertex = NULL;
hipFree(dev_edges);
dev_edges = NULL;
checkCUDAError("rasterizeFree");
}
| b72772505e744be1a4b743b6ff1e4bc4744aa301.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
//#include "sceneStructs.h"
#include "Scene.h"
extern Scene *scene;
#define SHOW_TIMING 0
struct keep
{
__host__ __device__ bool operator()(const Triangle t)
{
return (!t.keep);
}
};
static int width = 0;
static int height = 0;
static int *dev_bufIdx = NULL;
static VertexIn *dev_bufVertex = NULL;
static Triangle *dev_primitives = NULL;
static Edge *dev_edges = NULL;
static Fragment *dev_depthbuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
static glm::mat4 matrix;
static glm::vec3 camDir;
static Light light;
static Camera cam;
//Things added
static VertexOut *dev_outVertex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// Writes fragment colors to the framebuffer
__global__
void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
framebuffer[index] = depthbuffer[index].color;
}
}
//Kernel function to figure out vertex in transformes space and NDC
__global__
void kernVertexShader(int numVertices, int w, int h, VertexIn * inVertex, VertexOut *outVertex, Camera cam)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numVertices)
{
glm::vec4 outPoint = glm::vec4(inVertex[index].pos.x, inVertex[index].pos.y, inVertex[index].pos.z, 1.0f);
outVertex[index].transformedPos = multiplyMV(cam.model, outPoint);
outPoint = cam.cameraMatrix * outPoint;
if(outPoint.w != 0)
outPoint /= outPoint.w;
//In NDC
// outVertex[index].pos = glm::vec3(outPoint);
//In Device Coordinates
outVertex[index].pos.x = outPoint.x * w;
outVertex[index].pos.y = outPoint.y * h;
outVertex[index].pos.z = outPoint.z;
outVertex[index].nor = multiplyMV(cam.inverseTransposeModel, glm::vec4(inVertex[index].nor, 0.0f));
// outVertex[index].col = glm::vec3(0,0,1);
// outVertex[index].nor = inVertex[index].nor;
// printf ("InVertex : %f %f \nOutVertex : %f %f \n\n", inVertex[index].pos.x, inVertex[index].pos.y, outVertex[index].pos.x, outVertex[index].pos.y);
}
}
//Kernel function to assemble triangles
__global__
void kernPrimitiveAssembly(int numTriangles, VertexOut *outVertex, VertexIn *inVertex, Triangle *triangles, int* indices, glm::vec3 camDir, bool backFaceCulling)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numTriangles)
{
int k_3 = 3 * index;
Triangle &t = triangles[index];
//Find the triangle normal
glm::vec3 triNor = (outVertex[k_3].nor + outVertex[k_3+1].nor + outVertex[k_3+2].nor);
// printf ("Tri Normal : %f %f %f\n", triNor.x, triNor.y, triNor.z);
// printf ("Cam Dir : %f %f %f\n", camDir.x, camDir.y, camDir.z);
if(backFaceCulling && glm::dot(triNor, camDir) > 0.0f)
{
//Triangle facing away from the camera
// Mark for deletion
t.keep = false;
}
else
{
//Else save it
t.keep = true;
t.vOut[0] = outVertex[indices[k_3]];
t.vOut[1] = outVertex[indices[k_3+1]];
t.vOut[2] = outVertex[indices[k_3+2]];
t.vIn[0] = inVertex[indices[k_3]];
t.vIn[1] = inVertex[indices[k_3+1]];
t.vIn[2] = inVertex[indices[k_3+2]];
}
}
}
//Kernel function to assemble edges
__global__
void kernEdgeAssembly(int numTriangles, VertexOut *outVertex, Edge *edge, int* indices)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numTriangles)
{
int k_3 = 3 * index;
edge[indices[k_3]].v1 = outVertex[k_3].pos;
edge[indices[k_3]].v2 = outVertex[k_3+1].pos;
edge[indices[k_3+1]].v1 = outVertex[k_3+1].pos;
edge[indices[k_3+1]].v2 = outVertex[k_3+2].pos;
edge[indices[k_3+2]].v1 = outVertex[k_3+2].pos;
edge[indices[k_3+2]].v2 = outVertex[k_3].pos;
}
}
//Kernel function to draw axis
__global__
void kernDrawAxis(int w, int h, Fragment *fragments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
if((x - w*0.5f) == 0)
{
fragments[index].color = glm::vec3(0, 1, 0);
}
else if((y - h*0.5f) == 0)
{
fragments[index].color = glm::vec3(1, 0, 0);
}
else if(x == 0 || x == w-1)
{
fragments[index].color = glm::vec3(1);
}
else if(y == 0 || y == h)
{
fragments[index].color = glm::vec3(1);
}
}
}
//Kernel function to clear the depth and color buffer
__global__
void kernClearFragmentBuffer(int w, int h, Fragment *fragments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
Fragment &f = fragments[index];
glm::vec3 ZERO(0.0f);
f.color = ZERO;
f.depth[0] = INT_MAX;
f.depth[1] = INT_MAX;
f.depth[2] = INT_MAX;
f.depth[3] = INT_MAX;
f.c[0] = ZERO;
f.c[1] = ZERO;
f.c[2] = ZERO;
f.c[3] = ZERO;
f.primitiveCol[0] = ZERO;
f.primitiveCol[1] = ZERO;
f.primitiveCol[2] = ZERO;
f.primitiveCol[3] = ZERO;
f.primitiveNor[0] = ZERO;
f.primitiveNor[1] = ZERO;
f.primitiveNor[2] = ZERO;
f.primitiveNor[3] = ZERO;
f.primitivePos[0] = ZERO;
f.primitivePos[1] = ZERO;
f.primitivePos[2] = ZERO;
f.primitivePos[3] = ZERO;
}
}
//Kernel function to rasterize the triangle
__global__
void kernRasterizeTraingles(int w, int h, Fragment *fragments, Triangle *triangles, int numTriangles, Camera cam, bool antiAliasing)
{
//Rasterization per triangle
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numTriangles)
{
Triangle &t = triangles[index];
glm::vec3 tri[3];
tri[0] = t.vOut[0].pos;
tri[1] = t.vOut[1].pos;
tri[2] = t.vOut[2].pos;
AABB aabb = getAABBForTriangle(tri);
glm::ivec3 min, max;
//Attempted clipping
min.x = glm::clamp(aabb.min.x, -(float)w*0.5f+1, (float)w*0.5f-1);
min.y = glm::clamp(aabb.min.y, -(float)h*0.5f+1, (float)h*0.5f-1);
max.x = glm::clamp(aabb.max.x, -(float)w*0.5f+1, (float)w*0.5f-1);
max.y = glm::clamp(aabb.max.y, -(float)h*0.5f+1, (float)h*0.5f-1);
for(int i=min.x-1; i<=max.x+1; ++i)
{
for(int j=min.y-1; j<=max.y+1; ++j)
{
glm::vec2 point[4];
int iterCount;
if(antiAliasing)
{
point[0] = glm::vec2(float(i) - 0.25f, float(j) - 0.25f);
point[1] = glm::vec2(float(i) - 0.25f, float(j) + 0.25f);
point[2] = glm::vec2(float(i) + 0.25f, float(j) - 0.25f);
point[3] = glm::vec2(float(i) + 0.25f, float(j) + 0.25f);
iterCount = 4;
}
else
{
point[0] = glm::ivec2(i,j);
iterCount = 1;
}
for(int k=0; k<iterCount; ++k)
{
glm::vec3 barycentric = calculateBarycentricCoordinate(tri, point[k]);
if(isBarycentricCoordInBounds(barycentric))
{
glm::vec3 triIn[3];
VertexIn tvIn[3] = {t.vIn[0], t.vIn[1], t.vIn[2]};
triIn[0] = t.vOut[0].transformedPos;
triIn[1] = t.vOut[1].transformedPos;
triIn[2] = t.vOut[2].transformedPos;
int fragIndex = int((i+w*0.5) + (j + h*0.5)*w);
int depth = getZAtCoordinate(barycentric, triIn) * 10000;
//Depth testing
if(depth < fragments[fragIndex].depth[k])
{
atomicMin(&fragments[fragIndex].depth[k], depth);
Fragment &f = fragments[fragIndex];
//Fragment shading data
f.primitiveNor[k] = barycentric.x * t.vOut[0].nor +
barycentric.y * t.vOut[1].nor +
barycentric.z * t.vOut[2].nor;
f.primitivePos[k] = barycentric.x * t.vOut[0].transformedPos +
barycentric.y * t.vOut[1].transformedPos +
barycentric.z * t.vOut[2].transformedPos;
f.primitiveCol[k] = barycentric.x * tvIn[0].col +
barycentric.y * tvIn[1].col +
barycentric.z * tvIn[2].col;
}
}
}
}
}
}
}
__global__
void kernFragmentShader(int w, int h, Fragment * fragment, Light light1, Light light2, bool antiAliasing)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int fragIndex = x + (y * w);
if (x < w && y < h)
{
Fragment & f = fragment[fragIndex];
if((f.depth[0] < INT_MAX) || (f.depth[1] < INT_MAX) || (f.depth[2] < INT_MAX) || (f.depth[3] < INT_MAX))
{
if(!antiAliasing)
{
f.color = calculateFragColor(f.primitiveNor[0], f.primitivePos[0], f.primitiveCol[0], light1, light2);
}
else
{
f.color = 0.25f * (calculateFragColor(f.primitiveNor[0], f.primitivePos[0], f.primitiveCol[0], light1, light2) +
calculateFragColor(f.primitiveNor[1], f.primitivePos[1], f.primitiveCol[1], light1, light2) +
calculateFragColor(f.primitiveNor[2], f.primitivePos[2], f.primitiveCol[2], light1, light2) +
calculateFragColor(f.primitiveNor[3], f.primitivePos[3], f.primitiveCol[3], light1, light2)
);
}
}
}
}
//Kernel function to rasterize points
__global__
void kernRasterizePoints(int numVertices, int w, int h, Fragment *fragments, VertexOut * vertices, Camera cam, Light light1, Light light2)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numVertices)
{
glm::ivec2 point(vertices[index].pos.x, vertices[index].pos.y);
//If point within bounds
if(point.x > -w*0.5
&& point.x < w*0.5f
&& point.y > -h*0.5f
&& point.y < h*0.5f )
{
//Color the corresponding fragment
fragments[int((point.x + w*0.5f) + (point.y + h*0.5f)*w)].color = glm::vec3(1.0f);
}
}
}
//Kernel function to rasterize lines
__global__
void kernRasterizeLines(int numVertices, int w, int h, Fragment *fragments, Edge *edge, Camera cam, Light light1, Light light2)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < numVertices)
{
Edge &e = edge[index];
glm::vec2 v1(e.v1.x, e.v1.y);
glm::vec2 v2(e.v2.x, e.v2.y);
//Clamp edge to screen boundaries
v1.x = glm::clamp(v1.x, -(float)w*0.5f, (float)w*0.5f);
v1.y = glm::clamp(v1.y, -(float)h*0.5f, (float)h*0.5f);
v2.x = glm::clamp(v2.x, -(float)w*0.5f, (float)w*0.5f);
v2.y = glm::clamp(v2.y, -(float)h*0.5f, (float)h*0.5f);
float m = (v2.y - v1.y) / (v2.x - v1.x);
int inc;
if(m > 1)
{
if(v1.y > v2.y)
{
inc = -1;
}
else
{
inc = 1;
}
int i, j;
for(j=v1.y; j!=(int)v2.y; j += inc)
{
i = ((float)j - v1.y) / m + v1.x;
fragments[int((i + w*0.5f) + (j + h*0.5f)*w)].color = glm::vec3(1.0f);
}
}
else
{
if(v1.x > v2.x)
{
inc = -1;
}
else
{
inc = 1;
}
int i, j;
for(i=v1.x; i!=(int)v2.x; i += inc)
{
j = m * ((float)i - v1.x) + v1.y;
fragments[int((i + w*0.5f) + (j + h*0.5f)*w)].color = glm::vec3(1.0f);
}
}
}
}
__global__
void kernAntiAliasing(int numTriangles, int w, int h, Fragment * fragment, Triangle * triangles)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < numTriangles)
{
Triangle &t = triangles[index];
glm::vec3 tri[3];
tri[0] = t.vOut[0].pos;
tri[1] = t.vOut[1].pos;
tri[2] = t.vOut[2].pos;
AABB aabb = getAABBForTriangle(tri);
glm::ivec3 min, max;
//Attempted clipping
min.x = glm::clamp(aabb.min.x, -(float)w*0.5f+2, (float)w*0.5f-2);
min.y = glm::clamp(aabb.min.y, -(float)h*0.5f+2, (float)h*0.5f-2);
max.x = glm::clamp(aabb.max.x, -(float)w*0.5f+2, (float)w*0.5f-2);
max.y = glm::clamp(aabb.max.y, -(float)h*0.5f+2, (float)h*0.5f-2);
for(int i=min.x-1; i<=max.x+1; ++i)
{
for(int j=min.y-1; j<=max.y+1; ++j)
{
int fragIndex = int((i+w*0.5) + (j + h*0.5)*w);
int fragIndex0 = int((i+1 + w*0.5) + (j+1 + h*0.5)*w);
int fragIndex1 = int((i+1 + w*0.5) + (j-1 + h*0.5)*w);
int fragIndex2 = int((i-1 + w*0.5) + (j+1 + h*0.5)*w);
int fragIndex3 = int((i-1 + w*0.5) + (j-1 + h*0.5)*w);
int fragIndex4 = int((i+1 + w*0.5) + (j + h*0.5)*w);
int fragIndex5 = int((i-1 + w*0.5) + (j + h*0.5)*w);
int fragIndex6 = int((i + w*0.5) + (j+1 + h*0.5)*w);
int fragIndex7 = int((i + w*0.5) + (j-1 + h*0.5)*w);
fragment[fragIndex].color = 0.25f * fragment[fragIndex].color +
0.125f * (fragment[fragIndex4].color+
fragment[fragIndex5].color+
fragment[fragIndex6].color+
fragment[fragIndex7].color) +
0.0625f* (fragment[fragIndex0].color+
fragment[fragIndex1].color+
fragment[fragIndex2].color+
fragment[fragIndex3].color);
}
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_depthbuffer);
cudaMalloc(&dev_depthbuffer, width * height * sizeof(Fragment));
cudaMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol) {
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
cudaFree(dev_bufIdx);
cudaMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
cudaMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), cudaMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
}
cudaFree(dev_bufVertex);
cudaMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
cudaMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), cudaMemcpyHostToDevice);
cudaFree(dev_primitives);
cudaMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle));
cudaMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle));
cudaFree(dev_outVertex);
cudaMalloc((void**)&dev_outVertex, vertCount * sizeof(VertexOut));
cudaFree(dev_edges);
cudaMalloc((void**)&dev_edges, vertCount * sizeof(Edge));
checkCUDAError("rasterizeSetBuffers");
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
if(scene->run)
{
int numThreads = 128;
int numBlocks;
int numTriangles = vertCount/3;
scene->run = false;
Camera &cam = scene->cam;
Light &light1 = scene->light1;
Light &light2 = scene->light2;
//Clear the color and depth buffers
kernClearFragmentBuffer<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer);
//Drawing axis
kernDrawAxis<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer);
switch (scene->renderMode)
{
case TRIANGLES:
{
cudaEvent_t startAll, stopAll;
cudaEventCreate(&startAll);
cudaEventCreate(&stopAll);
cudaEventRecord(startAll);
Triangle *dev_primitivesEnd;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//------------------------------Vertex Shading------------------------------------
cudaEventRecord(start);
//Do vertex shading
numBlocks = (vertCount + numThreads -1)/numThreads;
kernVertexShader<<<numBlocks, numThreads>>>(vertCount, width, height, dev_bufVertex, dev_outVertex, cam);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Vertex Shading: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
//-----------------------------Primitive Assembly-------------------------------------------
cudaEventRecord(start);
//Do primitive (triangle) assembly
numBlocks = (numTriangles + numThreads -1)/numThreads;
kernPrimitiveAssembly<<<numBlocks, numThreads>>>(numTriangles, dev_outVertex, dev_bufVertex, dev_primitives, dev_bufIdx, cam.dir, scene->backFaceCulling);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Primitive Assembly: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
if(scene->backFaceCulling)
{
//Back face culling
dev_primitivesEnd = dev_primitives + numTriangles;
dev_primitivesEnd = thrust::remove_if(thrust::device, dev_primitives, dev_primitivesEnd, keep());
numTriangles = dev_primitivesEnd - dev_primitives;
}
//--------------------------------Rasterization---------------------------------------
cudaEventRecord(start);
//Rasterization per triangle
numBlocks = (numTriangles + numThreads -1)/numThreads;
kernRasterizeTraingles<<<numBlocks, numThreads>>>(width, height, dev_depthbuffer, dev_primitives, numTriangles, cam, scene->antiAliasing);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Rasterize Triangle: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
//------------------------------------Fragment Shading----------------------------------------
cudaEventRecord(start);
kernFragmentShader<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer, light1, light2, scene->antiAliasing);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Time Fragment Shader: "<<milliseconds<<std::endl;
//--------------------------------------------------------------------------------------------
// if(scene->antiAliasing)
// {
// kernAntiAliasing<<<numBlocks, numThreads>>>(numTriangles, width, height, dev_depthbuffer, dev_primitives);
// }
cudaEventRecord(stopAll);
cudaEventSynchronize(stopAll);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, startAll, stopAll);
if(SHOW_TIMING)
std::cout<<"Time All: "<<milliseconds<<std::endl;
std::cout<<std::endl;
break;
}
case POINTS:
{
//Do vertex shading
numBlocks = (vertCount + numThreads -1)/numThreads;
kernVertexShader<<<numBlocks, numThreads>>>(vertCount, width, height, dev_bufVertex, dev_outVertex, cam);
//Rasterization per vertex
kernRasterizePoints<<<numBlocks, numThreads>>>(vertCount, width, height, dev_depthbuffer, dev_outVertex, cam, light1, light2);
break;
}
case LINES:
{
//Do vertex shading
numBlocks = (vertCount + numThreads -1)/numThreads;
kernVertexShader<<<numBlocks, numThreads>>>(vertCount, width, height, dev_bufVertex, dev_outVertex, cam);
//Do primitive (edge) assembly
numBlocks = (numTriangles + numThreads -1)/numThreads;
kernEdgeAssembly<<<numBlocks, numThreads>>>(numTriangles, dev_outVertex, dev_edges, dev_bufIdx);
//Rasterization per edge
numBlocks = (vertCount + numThreads -1)/numThreads;
kernRasterizeLines<<<numBlocks, numThreads>>>(vertCount, width, height, dev_depthbuffer, dev_edges, cam, light1, light2);
break;
}
}
}
// Copy depthbuffer colors into framebuffer
render<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer, dev_framebuffer);
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
//Save image data to write to file
cudaMemcpy(scene->imageColor, dev_framebuffer, width*height*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("rasterize");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
cudaFree(dev_bufIdx);
dev_bufIdx = NULL;
cudaFree(dev_bufVertex);
dev_bufVertex = NULL;
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_depthbuffer);
dev_depthbuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_outVertex);
dev_outVertex = NULL;
cudaFree(dev_edges);
dev_edges = NULL;
checkCUDAError("rasterizeFree");
}
|
4a6b9541eee7cbb471df7e36bcf0d92dd2a86106.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Name Removed
// Homework 1
// Color to Greyscale Conversion
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//get thread/block indexes
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
//get pixel index
int pix_index = (idx * numCols) + idy;
//grab rgb data
const uchar4 input_val = rgbaImage[pix_index];
//greyscale the output
greyImage[pix_index] = input_val.x * .299f + input_val.y * .587f + input_val.z * .114f;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
// using 256 (16 * 16) thread per block
// blocks should be picture divided into blocks of 256 pixels (rounded up), divided by 16 in x & y directions
const dim3 gridSize(16,16,1);
const dim3 blockSize(((numRows+15)/16),((numCols+15)/16),1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 4a6b9541eee7cbb471df7e36bcf0d92dd2a86106.cu | //Name Removed
// Homework 1
// Color to Greyscale Conversion
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//get thread/block indexes
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
//get pixel index
int pix_index = (idx * numCols) + idy;
//grab rgb data
const uchar4 input_val = rgbaImage[pix_index];
//greyscale the output
greyImage[pix_index] = input_val.x * .299f + input_val.y * .587f + input_val.z * .114f;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
// using 256 (16 * 16) thread per block
// blocks should be picture divided into blocks of 256 pixels (rounded up), divided by 16 in x & y directions
const dim3 gridSize(16,16,1);
const dim3 blockSize(((numRows+15)/16),((numCols+15)/16),1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
d3528028e5d31742552272cdc5ba4ac13c0065f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "math.h"
#include "stdlib.h"
#include "stdio.h"
#include "time.h"
#include "sys/time.h"
#include "hip/hip_runtime.h"
#include "image_template.h"
__global__
void convolve_hor(float *image, int width, int height, float *mask, int mask_width, float* out_image){
int i, j, k;
int sharedwidth = blockDim.x + 2 * floorf(mask_width/2);
float sum=0;
extern __shared__ float AShared[];
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
// Collab load
AShared[threadIdx.x * sharedwidth + (threadIdx.y + mask_width/2)] = image[i*width+j];
__syncthreads();
// Load Left aprons
if(threadIdx.y >= blockDim.y - (mask_width/2)){
if(blockIdx.y >= 1){
AShared[threadIdx.x*sharedwidth + threadIdx.y - blockDim.y + (mask_width/2)]
= image[i*width + (blockIdx.y-1) * blockDim.y + threadIdx.y];
}
else{
AShared[threadIdx.x * sharedwidth + threadIdx.y - blockDim.y + (mask_width/2)] = 0;
}
}
__syncthreads();
// Load right aprons
if(threadIdx.y < (mask_width/2)){
if(blockIdx.y < gridDim.y-1){
AShared[threadIdx.x*sharedwidth + mask_width/2 + blockDim.y + threadIdx.y]
= image[i * width + (blockIdx.y + 1)* blockDim.y + threadIdx.y];
}
else{
AShared[threadIdx.x * sharedwidth + mask_width/2 + blockDim.y + threadIdx.y] = 0;
}
}
__syncthreads();
// Convolve
for(k=0; k<mask_width; k++){
sum += AShared[threadIdx.x * sharedwidth + (threadIdx.y+k)]*mask[k];
}
// Write results
out_image[i*width+j] = sum;
__syncthreads();
}
__global__
void convolve_ver(float *image, int width, int height, float *mask, int mask_width, float* out_image){
int i, j, k;
int sharedwidth = blockDim.x;
float sum=0;
extern __shared__ float AShared[];
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
// Collab load
AShared[(threadIdx.x+mask_width/2) * sharedwidth + (threadIdx.y)] = image[i*width+j];
__syncthreads();
// Load top aprons
if(threadIdx.x >= blockDim.x - (mask_width/2)){
if(blockIdx.x >= 1){
AShared[(threadIdx.x-blockDim.x+(mask_width/2))*sharedwidth + threadIdx.y]
= image[((blockIdx.x-1)*blockDim.x+threadIdx.x)*width + j];
}
else{
AShared[(threadIdx.x-blockDim.x+(mask_width/2))*sharedwidth + threadIdx.y]=0;
}
}
__syncthreads();
// Load bot aprons
if(threadIdx.x < (mask_width/2)){
if(blockIdx.x < gridDim.x-1){
AShared[(threadIdx.x+blockDim.x +mask_width/2)*sharedwidth +threadIdx.y]
= image[((blockIdx.x+1)*blockDim.x+threadIdx.x)*width + j];
}
else{
AShared[(threadIdx.x+blockDim.x + mask_width/2)*sharedwidth +threadIdx.y]=0;
}
}
__syncthreads();
// Convolve
for(k=0; k<mask_width; k++){
sum += AShared[(threadIdx.x+k) * sharedwidth + (threadIdx.y)]*mask[k];
}
// Write results
out_image[i*width+j] = sum;
__syncthreads();
}
void create_gaussians(float **g_kernel, float **dg_kernel, float sigma, int *w){
float a = ceil(2.5*sigma-0.5);
int sum = 0;
*w=2*a+1;
*g_kernel=(float*)malloc(sizeof(float)*(*w));
// Calculate gaussian
for(int i=0; i<(*w); i++){
(*g_kernel)[i] = exp((-1*(i-a)*(i-a))/
(2*sigma*sigma));
sum+=(*g_kernel)[i];
}
// Normalize
for(int i=0; i<(*w); i++){
(*g_kernel)[i]/=sum;
}
// Calculate Derivative
sum = 0;
*dg_kernel=(float*)malloc(sizeof(float)*(*w));
for(int i=0; i<(*w); i++){
(*dg_kernel)[i] = (-1*(i-a))*exp((-1*(i-a)*(i-a))/
(2*sigma*sigma));
sum-=i*(*dg_kernel)[i];
}
// Normalize
for(int i=0; i<(*w); i++){
(*dg_kernel)[i]/=sum;
}
}
void print_matrix(float *matrix, int height, int width){
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
printf("%.3f ", *(matrix+(i*width)+j));
}
printf("\n");
}
}
int main(int argc, char **argv){
if(argc != 3)
printf("convolution <file> <sigma>\n");
else{
int height, width, k_width;
struct timeval start, end;
// CPU buffer for orig_img
float *org_img;
// GPU device buffer for original img
float *d_org_img;
//CPU host buffers for the final output
float *vertical_gradient, *horizontal_gradient, *temp_gradient, *temp_hor_gradient;
//GPU host buffers for the final output
float *d_vertical_gradient, *d_horizontal_gradient;
// GPU buffers to hold intermediate convolution results
float *d_temp_horizontal, *d_temp_vertical;
// CPU host buffers to store convolution masks
float *gaussian_kernel, *gaussian_deriv;
// GPU device buffers to store the convolutions masks
float *d_gaussian_kernel, *d_gaussian_deriv;
read_image_template(argv[1],
&org_img,
&width,
&height);
create_gaussians(&gaussian_kernel, &gaussian_deriv, atof(argv[2]), &k_width);
printf("Gaussian Kernel:\n");
print_matrix(gaussian_kernel, 1, k_width);
printf("Derivative Kernel:\n");
print_matrix(gaussian_deriv,1,k_width);
// CPU host mallocs for GPU buffers
hipMalloc((void**)&d_org_img, sizeof(float)*width*height);
hipMalloc((void**)&d_temp_horizontal, sizeof(float)*width*height);
hipMalloc((void**)&d_temp_vertical, sizeof(float)*width*height);
hipMalloc((void**)&d_horizontal_gradient, sizeof(float)*width*height);
hipMalloc((void**)&d_vertical_gradient, sizeof(float)*width*height);
hipMalloc((void**)&d_gaussian_kernel, sizeof(float)*k_width);
hipMalloc((void**)&d_gaussian_deriv, sizeof(float)*k_width);
gettimeofday(&start, NULL);
// Offload all of the data to GPU device for convolution
hipMemcpy(d_org_img, org_img, sizeof(float)*width*height, hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_kernel, gaussian_kernel, sizeof(float)*k_width, hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_deriv, gaussian_deriv, sizeof(float)*k_width, hipMemcpyHostToDevice);
int block_dim = 16;
dim3 dmGrid(ceil(height/block_dim), ceil(width/block_dim), 1);
dim3 dmBlock(block_dim, block_dim, 1);
// Vertical Gradient
hipLaunchKernelGGL(( convolve_hor), dim3(dmGrid),dim3(dmBlock), sizeof(float)*16*(16+2*floor(k_width/2)), 0,
d_org_img, width, height, d_gaussian_kernel, k_width, d_temp_vertical);
hipLaunchKernelGGL(( convolve_ver), dim3(dmGrid),dim3(dmBlock),sizeof(float)*16*(16+2*floor(k_width/2)), 0,
d_temp_vertical, width, height, d_gaussian_deriv, k_width, d_vertical_gradient);
// Horizontal Gradient
hipLaunchKernelGGL(( convolve_ver), dim3(dmGrid),dim3(dmBlock),sizeof(float)*16*(16+2*floor(k_width/2)), 0,
d_org_img, width, height, d_gaussian_kernel, k_width, d_temp_horizontal);
hipLaunchKernelGGL(( convolve_hor), dim3(dmGrid),dim3(dmBlock),sizeof(float)*16*(16+2*floor(k_width/2)), 0,
d_temp_horizontal, width, height, d_gaussian_deriv, k_width, d_horizontal_gradient);
horizontal_gradient = (float*)malloc(sizeof(float)*height*width);
vertical_gradient = (float*)malloc(sizeof(float)*height*width);
temp_gradient = (float*)malloc(sizeof(float)*height*width);
temp_hor_gradient = (float*)malloc(sizeof(float)*height*width);
hipMemcpy(horizontal_gradient, d_horizontal_gradient, sizeof(float)*width*height, hipMemcpyDeviceToHost);
hipMemcpy(vertical_gradient, d_vertical_gradient, sizeof(float)*width*height, hipMemcpyDeviceToHost);
gettimeofday(&end, NULL);
write_image_template("h_gradient_L1.pgm", horizontal_gradient, width, height);
write_image_template("v_gradient_L1.pgm", vertical_gradient, width, height);
printf("%ld\n", (end.tv_sec *1000000 + end.tv_usec)-(start.tv_sec * 1000000 + start.tv_usec));
// Cuda Free
hipFree(d_org_img);
hipFree(d_temp_horizontal);
hipFree(d_temp_vertical);
hipFree(d_horizontal_gradient);
hipFree(d_vertical_gradient);
hipFree(d_gaussian_kernel);
hipFree(d_gaussian_deriv);
}
}
| d3528028e5d31742552272cdc5ba4ac13c0065f5.cu | #include "math.h"
#include "stdlib.h"
#include "stdio.h"
#include "time.h"
#include "sys/time.h"
#include "cuda.h"
#include "image_template.h"
__global__
void convolve_hor(float *image, int width, int height, float *mask, int mask_width, float* out_image){
int i, j, k;
int sharedwidth = blockDim.x + 2 * floorf(mask_width/2);
float sum=0;
extern __shared__ float AShared[];
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
// Collab load
AShared[threadIdx.x * sharedwidth + (threadIdx.y + mask_width/2)] = image[i*width+j];
__syncthreads();
// Load Left aprons
if(threadIdx.y >= blockDim.y - (mask_width/2)){
if(blockIdx.y >= 1){
AShared[threadIdx.x*sharedwidth + threadIdx.y - blockDim.y + (mask_width/2)]
= image[i*width + (blockIdx.y-1) * blockDim.y + threadIdx.y];
}
else{
AShared[threadIdx.x * sharedwidth + threadIdx.y - blockDim.y + (mask_width/2)] = 0;
}
}
__syncthreads();
// Load right aprons
if(threadIdx.y < (mask_width/2)){
if(blockIdx.y < gridDim.y-1){
AShared[threadIdx.x*sharedwidth + mask_width/2 + blockDim.y + threadIdx.y]
= image[i * width + (blockIdx.y + 1)* blockDim.y + threadIdx.y];
}
else{
AShared[threadIdx.x * sharedwidth + mask_width/2 + blockDim.y + threadIdx.y] = 0;
}
}
__syncthreads();
// Convolve
for(k=0; k<mask_width; k++){
sum += AShared[threadIdx.x * sharedwidth + (threadIdx.y+k)]*mask[k];
}
// Write results
out_image[i*width+j] = sum;
__syncthreads();
}
__global__
void convolve_ver(float *image, int width, int height, float *mask, int mask_width, float* out_image){
int i, j, k;
int sharedwidth = blockDim.x;
float sum=0;
extern __shared__ float AShared[];
i=blockIdx.x*blockDim.x + threadIdx.x;
j=blockIdx.y*blockDim.y + threadIdx.y;
// Collab load
AShared[(threadIdx.x+mask_width/2) * sharedwidth + (threadIdx.y)] = image[i*width+j];
__syncthreads();
// Load top aprons
if(threadIdx.x >= blockDim.x - (mask_width/2)){
if(blockIdx.x >= 1){
AShared[(threadIdx.x-blockDim.x+(mask_width/2))*sharedwidth + threadIdx.y]
= image[((blockIdx.x-1)*blockDim.x+threadIdx.x)*width + j];
}
else{
AShared[(threadIdx.x-blockDim.x+(mask_width/2))*sharedwidth + threadIdx.y]=0;
}
}
__syncthreads();
// Load bot aprons
if(threadIdx.x < (mask_width/2)){
if(blockIdx.x < gridDim.x-1){
AShared[(threadIdx.x+blockDim.x +mask_width/2)*sharedwidth +threadIdx.y]
= image[((blockIdx.x+1)*blockDim.x+threadIdx.x)*width + j];
}
else{
AShared[(threadIdx.x+blockDim.x + mask_width/2)*sharedwidth +threadIdx.y]=0;
}
}
__syncthreads();
// Convolve
for(k=0; k<mask_width; k++){
sum += AShared[(threadIdx.x+k) * sharedwidth + (threadIdx.y)]*mask[k];
}
// Write results
out_image[i*width+j] = sum;
__syncthreads();
}
void create_gaussians(float **g_kernel, float **dg_kernel, float sigma, int *w){
float a = ceil(2.5*sigma-0.5);
int sum = 0;
*w=2*a+1;
*g_kernel=(float*)malloc(sizeof(float)*(*w));
// Calculate gaussian
for(int i=0; i<(*w); i++){
(*g_kernel)[i] = exp((-1*(i-a)*(i-a))/
(2*sigma*sigma));
sum+=(*g_kernel)[i];
}
// Normalize
for(int i=0; i<(*w); i++){
(*g_kernel)[i]/=sum;
}
// Calculate Derivative
sum = 0;
*dg_kernel=(float*)malloc(sizeof(float)*(*w));
for(int i=0; i<(*w); i++){
(*dg_kernel)[i] = (-1*(i-a))*exp((-1*(i-a)*(i-a))/
(2*sigma*sigma));
sum-=i*(*dg_kernel)[i];
}
// Normalize
for(int i=0; i<(*w); i++){
(*dg_kernel)[i]/=sum;
}
}
void print_matrix(float *matrix, int height, int width){
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
printf("%.3f ", *(matrix+(i*width)+j));
}
printf("\n");
}
}
int main(int argc, char **argv){
if(argc != 3)
printf("convolution <file> <sigma>\n");
else{
int height, width, k_width;
struct timeval start, end;
// CPU buffer for orig_img
float *org_img;
// GPU device buffer for original img
float *d_org_img;
//CPU host buffers for the final output
float *vertical_gradient, *horizontal_gradient, *temp_gradient, *temp_hor_gradient;
//GPU host buffers for the final output
float *d_vertical_gradient, *d_horizontal_gradient;
// GPU buffers to hold intermediate convolution results
float *d_temp_horizontal, *d_temp_vertical;
// CPU host buffers to store convolution masks
float *gaussian_kernel, *gaussian_deriv;
// GPU device buffers to store the convolutions masks
float *d_gaussian_kernel, *d_gaussian_deriv;
read_image_template(argv[1],
&org_img,
&width,
&height);
create_gaussians(&gaussian_kernel, &gaussian_deriv, atof(argv[2]), &k_width);
printf("Gaussian Kernel:\n");
print_matrix(gaussian_kernel, 1, k_width);
printf("Derivative Kernel:\n");
print_matrix(gaussian_deriv,1,k_width);
// CPU host mallocs for GPU buffers
cudaMalloc((void**)&d_org_img, sizeof(float)*width*height);
cudaMalloc((void**)&d_temp_horizontal, sizeof(float)*width*height);
cudaMalloc((void**)&d_temp_vertical, sizeof(float)*width*height);
cudaMalloc((void**)&d_horizontal_gradient, sizeof(float)*width*height);
cudaMalloc((void**)&d_vertical_gradient, sizeof(float)*width*height);
cudaMalloc((void**)&d_gaussian_kernel, sizeof(float)*k_width);
cudaMalloc((void**)&d_gaussian_deriv, sizeof(float)*k_width);
gettimeofday(&start, NULL);
// Offload all of the data to GPU device for convolution
cudaMemcpy(d_org_img, org_img, sizeof(float)*width*height, cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_kernel, gaussian_kernel, sizeof(float)*k_width, cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_deriv, gaussian_deriv, sizeof(float)*k_width, cudaMemcpyHostToDevice);
int block_dim = 16;
dim3 dmGrid(ceil(height/block_dim), ceil(width/block_dim), 1);
dim3 dmBlock(block_dim, block_dim, 1);
// Vertical Gradient
convolve_hor<<<dmGrid,dmBlock, sizeof(float)*16*(16+2*floor(k_width/2))>>>
(d_org_img, width, height, d_gaussian_kernel, k_width, d_temp_vertical);
convolve_ver<<<dmGrid,dmBlock,sizeof(float)*16*(16+2*floor(k_width/2))>>>
(d_temp_vertical, width, height, d_gaussian_deriv, k_width, d_vertical_gradient);
// Horizontal Gradient
convolve_ver<<<dmGrid,dmBlock,sizeof(float)*16*(16+2*floor(k_width/2))>>>
(d_org_img, width, height, d_gaussian_kernel, k_width, d_temp_horizontal);
convolve_hor<<<dmGrid,dmBlock,sizeof(float)*16*(16+2*floor(k_width/2))>>>
(d_temp_horizontal, width, height, d_gaussian_deriv, k_width, d_horizontal_gradient);
horizontal_gradient = (float*)malloc(sizeof(float)*height*width);
vertical_gradient = (float*)malloc(sizeof(float)*height*width);
temp_gradient = (float*)malloc(sizeof(float)*height*width);
temp_hor_gradient = (float*)malloc(sizeof(float)*height*width);
cudaMemcpy(horizontal_gradient, d_horizontal_gradient, sizeof(float)*width*height, cudaMemcpyDeviceToHost);
cudaMemcpy(vertical_gradient, d_vertical_gradient, sizeof(float)*width*height, cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
write_image_template("h_gradient_L1.pgm", horizontal_gradient, width, height);
write_image_template("v_gradient_L1.pgm", vertical_gradient, width, height);
printf("%ld\n", (end.tv_sec *1000000 + end.tv_usec)-(start.tv_sec * 1000000 + start.tv_usec));
// Cuda Free
cudaFree(d_org_img);
cudaFree(d_temp_horizontal);
cudaFree(d_temp_vertical);
cudaFree(d_horizontal_gradient);
cudaFree(d_vertical_gradient);
cudaFree(d_gaussian_kernel);
cudaFree(d_gaussian_deriv);
}
}
|
988b86e60c986100185adc35a64778c8ea3f3dcb.hip | // !!! This is a file automatically generated by hipify!!!
#include <kernels/gpu/maxinum.h>
#include <core/tensor_builder.h>
#include <backend/name.h>
#include <utils/assert.h>
#include <global/operator_factory.h>
#include <global/fp16_operator_factory.h>
#include <core/device.h>
#include <numeric>
#include <core/memory.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <kernels/gpu/gpu_kernel.h>
namespace ts {
namespace gpu {
template<typename T>
static __global__ void reduce_operator_scalar_kernel(T* data, int size, const T *scalar) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
data[index] = data[index] > (*scalar) ? data[index] : (*scalar);
}
}
template<typename T>
static __global__ void reduce_operator_same_shape_kernel(T* data, const T*bias, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
data[index] = data[index] > bias[index] ? data[index] : bias[index];
}
}
template<typename T>
static __global__ void reduce_operator_bias_kernel(T* data, int size, int step, int slice,
const T* bias, int biaslen ) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
int dim = index % ( step * slice ) / (step);
data[index] = data[index] > bias[dim] ? data[index] : bias[dim];
}
}
template<typename T>
static __global__ void reduce_operator_kernel(T* out, int size, const T* lhs, const T* rhs,
int *lhsshape, int *lhsweight,
int *rhsshape, int *rhsweight,
int *outweight, int shapelen) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= size)
return;
int *ptmp = outweight + 1;
int ntmp = index;
int rhsindex = 0;
int lhsindex = 0;
int nbuff1,nbuff2;
nbuff1 = nbuff2 = 0;
for(int m = 0, i= shapelen - 1; i >= 0; --i, m++) {
if(i > 0) {
nbuff1 = ntmp / *ptmp;
ntmp %= *ptmp;
}else {
nbuff1 = ntmp;
}
nbuff2 = nbuff1 % lhsshape[m];
if(m < shapelen - 1) {
lhsindex += nbuff2 * lhsweight[m+1];
}else {
lhsindex += nbuff2;
}
nbuff2 = nbuff1 % rhsshape[m];
if(m < shapelen - 1) {
rhsindex += nbuff2 * rhsweight[m+1];
}else {
rhsindex += nbuff2;
}
++ptmp;
}
out[index] = lhs[lhsindex] > rhs[rhsindex] ? lhs[lhsindex] : rhs[rhsindex];
}
template<typename T>
static inline void maxinum_gpu_compute_run(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
HypeShape lhs_hype(lhs.sizes());
HypeShape rhs_hype(rhs.sizes());
HypeShape out_hype(out.sizes());
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
auto ncount = out.count();
int *lhsshape = nullptr;
int *rhsshape = nullptr;
int *lhsweight = nullptr;
int *rhsweight = nullptr;
int *outweight = nullptr;
/////////////////////////////////////
Shape tmpshape;
tmpshape.resize(1);
tmpshape[0] = int32_t(lhs.sizes().size());
Tensor lhs_tensor(out.device(), INT32, tmpshape);
lhsshape = lhs_tensor.data<int32_t>();
tmpshape[0] = int32_t(rhs.sizes().size());
Tensor rhs_tensor(out.device(), INT32, tmpshape);
rhsshape = rhs_tensor.data<int32_t>();
tmpshape[0] = int32_t(lhs.sizes().size());
Tensor lhs_weight_tensor(out.device(), INT32, tmpshape);
lhsweight = lhs_weight_tensor.data<int32_t>();
tmpshape[0] = int32_t(rhs.sizes().size());
Tensor rhs_weight_tensor(out.device(), INT32, tmpshape);
rhsweight = rhs_weight_tensor.data<int32_t>();
tmpshape[0] = int32_t(out.sizes().size());
Tensor out_weight_tensor(out.device(), INT32, tmpshape);
outweight = out_weight_tensor.data<int32_t>();
memcpy((void*)lhsshape, out.device(), lhs.sizes().size() * sizeof(int32_t),
(void*)lhs.sizes().data(), MemoryDevice(CPU), lhs.sizes().size() * sizeof(int32_t));
memcpy((void*)rhsshape, out.device(), rhs.sizes().size() * sizeof(int32_t),
(void*)rhs.sizes().data(), MemoryDevice(CPU), rhs.sizes().size() * sizeof(int32_t));
memcpy((void*)lhsweight, out.device(), lhs_hype.weight().size() * sizeof(int32_t),
(void*)lhs_hype.weight().data(), MemoryDevice(CPU), lhs_hype.weight().size() * sizeof(int32_t));
memcpy((void*)rhsweight, out.device(), rhs_hype.weight().size() * sizeof(int32_t),
(void*)rhs_hype.weight().data(), MemoryDevice(CPU), rhs_hype.weight().size() * sizeof(int32_t));
memcpy((void*)outweight, out.device(), out_hype.weight().size() * sizeof(int32_t),
(void*)out_hype.weight().data(), MemoryDevice(CPU), out_hype.weight().size() * sizeof(int32_t));
/////////////////////////////////////
RUN_KERNEL(reduce_operator_kernel, CUDA_BLOCK(ncount, CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, ncount,
plhs, prhs, lhsshape, lhsweight, rhsshape, rhsweight, outweight, int(out.sizes().size()));
}
template<typename T>
static inline void maxinum_gpu_compute_run_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
memcpy((void*)pout, out.device(), out.count() * sizeof(T),
(void*)plhs, lhs.device(), out.count() * sizeof(T));
RUN_KERNEL(reduce_operator_scalar_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pout, out.count(), prhs);
}
template<typename T>
static inline void maxinum_gpu_compute_run_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
memcpy((void*)pout, out.device(), out.count() * sizeof(T),
(void*)plhs, lhs.device(), out.count() * sizeof(T));
RUN_KERNEL(reduce_operator_same_shape_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pout, prhs, out.count());
}
template<typename T>
static inline void maxinum_gpu_compute_run_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) {
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
auto &out_shape = out.sizes();
auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>());
auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>());
auto channels = out_shape[dim];
memcpy((void*)pout, out.device(), out.count() * sizeof(T),
(void*)plhs, lhs.device(), out.count() * sizeof(T));
RUN_KERNEL(reduce_operator_bias_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pout, out.count(), count, channels, prhs, rhs.count());
}
void Maxinum::reduce_with_broadcast(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run<TYPE>(lhs, rhs, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
void Maxinum::reduce_with_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run_scalar<TYPE>(lhs, rhs, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
void Maxinum::reduce_with_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run_bias<TYPE>(lhs, rhs, out, dim); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
void Maxinum::reduce_with_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run_same_shape<TYPE>(lhs, rhs, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(Maxinum, GPU, name::layer::maximum())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(Maxinum, GPU, name::layer::maximum())
#endif
| 988b86e60c986100185adc35a64778c8ea3f3dcb.cu | #include <kernels/gpu/maxinum.h>
#include <core/tensor_builder.h>
#include <backend/name.h>
#include <utils/assert.h>
#include <global/operator_factory.h>
#include <global/fp16_operator_factory.h>
#include <core/device.h>
#include <numeric>
#include <core/memory.h>
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <kernels/gpu/gpu_kernel.h>
namespace ts {
namespace gpu {
template<typename T>
static __global__ void reduce_operator_scalar_kernel(T* data, int size, const T *scalar) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
data[index] = data[index] > (*scalar) ? data[index] : (*scalar);
}
}
template<typename T>
static __global__ void reduce_operator_same_shape_kernel(T* data, const T*bias, int size) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
data[index] = data[index] > bias[index] ? data[index] : bias[index];
}
}
template<typename T>
static __global__ void reduce_operator_bias_kernel(T* data, int size, int step, int slice,
const T* bias, int biaslen ) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < size) {
int dim = index % ( step * slice ) / (step);
data[index] = data[index] > bias[dim] ? data[index] : bias[dim];
}
}
template<typename T>
static __global__ void reduce_operator_kernel(T* out, int size, const T* lhs, const T* rhs,
int *lhsshape, int *lhsweight,
int *rhsshape, int *rhsweight,
int *outweight, int shapelen) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= size)
return;
int *ptmp = outweight + 1;
int ntmp = index;
int rhsindex = 0;
int lhsindex = 0;
int nbuff1,nbuff2;
nbuff1 = nbuff2 = 0;
for(int m = 0, i= shapelen - 1; i >= 0; --i, m++) {
if(i > 0) {
nbuff1 = ntmp / *ptmp;
ntmp %= *ptmp;
}else {
nbuff1 = ntmp;
}
nbuff2 = nbuff1 % lhsshape[m];
if(m < shapelen - 1) {
lhsindex += nbuff2 * lhsweight[m+1];
}else {
lhsindex += nbuff2;
}
nbuff2 = nbuff1 % rhsshape[m];
if(m < shapelen - 1) {
rhsindex += nbuff2 * rhsweight[m+1];
}else {
rhsindex += nbuff2;
}
++ptmp;
}
out[index] = lhs[lhsindex] > rhs[rhsindex] ? lhs[lhsindex] : rhs[rhsindex];
}
template<typename T>
static inline void maxinum_gpu_compute_run(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
HypeShape lhs_hype(lhs.sizes());
HypeShape rhs_hype(rhs.sizes());
HypeShape out_hype(out.sizes());
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
auto ncount = out.count();
int *lhsshape = nullptr;
int *rhsshape = nullptr;
int *lhsweight = nullptr;
int *rhsweight = nullptr;
int *outweight = nullptr;
/////////////////////////////////////
Shape tmpshape;
tmpshape.resize(1);
tmpshape[0] = int32_t(lhs.sizes().size());
Tensor lhs_tensor(out.device(), INT32, tmpshape);
lhsshape = lhs_tensor.data<int32_t>();
tmpshape[0] = int32_t(rhs.sizes().size());
Tensor rhs_tensor(out.device(), INT32, tmpshape);
rhsshape = rhs_tensor.data<int32_t>();
tmpshape[0] = int32_t(lhs.sizes().size());
Tensor lhs_weight_tensor(out.device(), INT32, tmpshape);
lhsweight = lhs_weight_tensor.data<int32_t>();
tmpshape[0] = int32_t(rhs.sizes().size());
Tensor rhs_weight_tensor(out.device(), INT32, tmpshape);
rhsweight = rhs_weight_tensor.data<int32_t>();
tmpshape[0] = int32_t(out.sizes().size());
Tensor out_weight_tensor(out.device(), INT32, tmpshape);
outweight = out_weight_tensor.data<int32_t>();
memcpy((void*)lhsshape, out.device(), lhs.sizes().size() * sizeof(int32_t),
(void*)lhs.sizes().data(), MemoryDevice(CPU), lhs.sizes().size() * sizeof(int32_t));
memcpy((void*)rhsshape, out.device(), rhs.sizes().size() * sizeof(int32_t),
(void*)rhs.sizes().data(), MemoryDevice(CPU), rhs.sizes().size() * sizeof(int32_t));
memcpy((void*)lhsweight, out.device(), lhs_hype.weight().size() * sizeof(int32_t),
(void*)lhs_hype.weight().data(), MemoryDevice(CPU), lhs_hype.weight().size() * sizeof(int32_t));
memcpy((void*)rhsweight, out.device(), rhs_hype.weight().size() * sizeof(int32_t),
(void*)rhs_hype.weight().data(), MemoryDevice(CPU), rhs_hype.weight().size() * sizeof(int32_t));
memcpy((void*)outweight, out.device(), out_hype.weight().size() * sizeof(int32_t),
(void*)out_hype.weight().data(), MemoryDevice(CPU), out_hype.weight().size() * sizeof(int32_t));
/////////////////////////////////////
RUN_KERNEL(reduce_operator_kernel, CUDA_BLOCK(ncount, CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, ncount,
plhs, prhs, lhsshape, lhsweight, rhsshape, rhsweight, outweight, int(out.sizes().size()));
}
template<typename T>
static inline void maxinum_gpu_compute_run_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
memcpy((void*)pout, out.device(), out.count() * sizeof(T),
(void*)plhs, lhs.device(), out.count() * sizeof(T));
RUN_KERNEL(reduce_operator_scalar_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pout, out.count(), prhs);
}
template<typename T>
static inline void maxinum_gpu_compute_run_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
memcpy((void*)pout, out.device(), out.count() * sizeof(T),
(void*)plhs, lhs.device(), out.count() * sizeof(T));
RUN_KERNEL(reduce_operator_same_shape_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pout, prhs, out.count());
}
template<typename T>
static inline void maxinum_gpu_compute_run_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) {
auto plhs = lhs.data<T>();
auto prhs = rhs.data<T>();
auto pout = out.data<T>();
auto &out_shape = out.sizes();
auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>());
auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>());
auto channels = out_shape[dim];
memcpy((void*)pout, out.device(), out.count() * sizeof(T),
(void*)plhs, lhs.device(), out.count() * sizeof(T));
RUN_KERNEL(reduce_operator_bias_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM,
pout, out.count(), count, channels, prhs, rhs.count());
}
void Maxinum::reduce_with_broadcast(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run<TYPE>(lhs, rhs, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
void Maxinum::reduce_with_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run_scalar<TYPE>(lhs, rhs, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
void Maxinum::reduce_with_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run_bias<TYPE>(lhs, rhs, out, dim); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
void Maxinum::reduce_with_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch(dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { maxinum_gpu_compute_run_same_shape<TYPE>(lhs, rhs, out); break; }
DECLARE_COMPUTE_RUN(INT8, int8_t);
DECLARE_COMPUTE_RUN(UINT8, uint8_t);
DECLARE_COMPUTE_RUN(INT16, int16_t);
DECLARE_COMPUTE_RUN(UINT16, uint16_t);
DECLARE_COMPUTE_RUN(INT32, int32_t);
DECLARE_COMPUTE_RUN(UINT32, uint32_t);
DECLARE_COMPUTE_RUN(INT64, int64_t);
DECLARE_COMPUTE_RUN(UINT64, uint64_t);
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(Maxinum, GPU, name::layer::maximum())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(Maxinum, GPU, name::layer::maximum())
#endif
|
7bd4420595522427567a91d7ea075beca51c4749.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
typedef struct {
real3 pos, force, torque, inducedDipole, inducedDipolePolar, sphericalDipole;
real q;
float thole, damp;
#ifdef INCLUDE_QUADRUPOLES
real sphericalQuadrupole[5];
#endif
} AtomData;
inline __device__ void loadAtomData(AtomData& data, int atom, const real4* __restrict__ posq, const real* __restrict__ sphericalDipole,
const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole, const real* __restrict__ inducedDipolePolar,
const float2* __restrict__ dampingAndThole) {
real4 atomPosq = posq[atom];
data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z);
data.q = atomPosq.w;
data.sphericalDipole.x = sphericalDipole[atom*3];
data.sphericalDipole.y = sphericalDipole[atom*3+1];
data.sphericalDipole.z = sphericalDipole[atom*3+2];
#ifdef INCLUDE_QUADRUPOLES
data.sphericalQuadrupole[0] = sphericalQuadrupole[atom*5];
data.sphericalQuadrupole[1] = sphericalQuadrupole[atom*5+1];
data.sphericalQuadrupole[2] = sphericalQuadrupole[atom*5+2];
data.sphericalQuadrupole[3] = sphericalQuadrupole[atom*5+3];
data.sphericalQuadrupole[4] = sphericalQuadrupole[atom*5+4];
#endif
data.inducedDipole.x = inducedDipole[atom*3];
data.inducedDipole.y = inducedDipole[atom*3+1];
data.inducedDipole.z = inducedDipole[atom*3+2];
data.inducedDipolePolar.x = inducedDipolePolar[atom*3];
data.inducedDipolePolar.y = inducedDipolePolar[atom*3+1];
data.inducedDipolePolar.z = inducedDipolePolar[atom*3+2];
float2 temp = dampingAndThole[atom];
data.damp = temp.x;
data.thole = temp.y;
}
__device__ real computeDScaleFactor(unsigned int polarizationGroup, int index) {
return (polarizationGroup & 1<<index ? 0 : 1);
}
__device__ float computeMScaleFactor(uint2 covalent, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
return (x ? (y ? 0.0f : 0.4f) : (y ? 0.8f : 1.0f));
}
__device__ float computePScaleFactor(uint2 covalent, unsigned int polarizationGroup, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
bool p = (polarizationGroup & mask);
return (x && y ? 0.0f : (x && p ? 0.5f : 1.0f));
}
__device__ void computeOneInteraction(AtomData& atom1, AtomData& atom2, bool hasExclusions, float dScale, float pScale, float mScale, float forceFactor,
mixed& energy, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
// Compute the displacement.
real3 delta;
delta.x = atom2.pos.x - atom1.pos.x;
delta.y = atom2.pos.y - atom1.pos.y;
delta.z = atom2.pos.z - atom1.pos.z;
APPLY_PERIODIC_TO_DELTA(delta)
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (r2 > CUTOFF_SQUARED)
return;
real rInv = RSQRT(r2);
real r = r2*rInv;
// Rotate the various dipoles and quadrupoles.
real qiRotationMatrix[3][3];
buildQIRotationMatrix(delta, rInv, qiRotationMatrix);
real3 qiUindI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipole.x + qiRotationMatrix[0][2]*atom1.inducedDipole.y + qiRotationMatrix[0][0]*atom1.inducedDipole.z,
qiRotationMatrix[1][1]*atom1.inducedDipole.x + qiRotationMatrix[1][2]*atom1.inducedDipole.y + qiRotationMatrix[1][0]*atom1.inducedDipole.z,
qiRotationMatrix[2][1]*atom1.inducedDipole.x + qiRotationMatrix[2][2]*atom1.inducedDipole.y + qiRotationMatrix[2][0]*atom1.inducedDipole.z);
real3 qiUindJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipole.x + qiRotationMatrix[0][2]*atom2.inducedDipole.y + qiRotationMatrix[0][0]*atom2.inducedDipole.z,
qiRotationMatrix[1][1]*atom2.inducedDipole.x + qiRotationMatrix[1][2]*atom2.inducedDipole.y + qiRotationMatrix[1][0]*atom2.inducedDipole.z,
qiRotationMatrix[2][1]*atom2.inducedDipole.x + qiRotationMatrix[2][2]*atom2.inducedDipole.y + qiRotationMatrix[2][0]*atom2.inducedDipole.z);
real3 qiUinpI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom1.inducedDipolePolar.z);
real3 qiUinpJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom2.inducedDipolePolar.z);
real3 rotatedDipole1 = rotateDipole(atom1.sphericalDipole, qiRotationMatrix);
real3 rotatedDipole2 = rotateDipole(atom2.sphericalDipole, qiRotationMatrix);
real rotatedQuadrupole1[] = {0, 0, 0, 0, 0};
real rotatedQuadrupole2[] = {0, 0, 0, 0, 0};
#ifdef INCLUDE_QUADRUPOLES
rotateQuadupoles(qiRotationMatrix, atom1.sphericalQuadrupole, atom2.sphericalQuadrupole, rotatedQuadrupole1, rotatedQuadrupole2);
#endif
// The field derivatives at I due to permanent and induced moments on J, and vice-versa.
// Also, their derivatives w.r.t. R, which are needed for force calculations
real Vij[9], Vji[9], VjiR[9], VijR[9];
// The field derivatives at I due to only permanent moments on J, and vice-versa.
real Vijp[3], Vijd[3], Vjip[3], Vjid[3];
real rInvVec[7], alphaRVec[8], bVec[5];
// The rInvVec array is defined such that the ith element is R^-i, with the
// dieleectric constant folded in, to avoid conversions later.
rInvVec[1] = rInv;
for (int i = 2; i < 7; ++i)
rInvVec[i] = rInvVec[i-1] * rInv;
// The alpharVec array is defined such that the ith element is (alpha R)^i,
// where kappa (alpha in OpenMM parlance) is the Ewald attenuation parameter.
real ralpha = EWALD_ALPHA*r;
real exp2a = EXP(-(ralpha*ralpha));
#ifdef USE_DOUBLE_PRECISION
const real erfAlphaR = erf(ralpha);
#else
// This approximation for erfc is from Abramowitz and Stegun (1964) p. 299. They cite the following as
// the original source: C. Hastings, Jr., Approximations for Digital Computers (1955). It has a maximum
// error of 1.5e-7.
const real t = RECIP(1.0f+0.3275911f*ralpha);
const real erfAlphaR = 1-(0.254829592f+(-0.284496736f+(1.421413741f+(-1.453152027f+1.061405429f*t)*t)*t)*t)*t*exp2a;
#endif
alphaRVec[1] = ralpha;
for (int i = 2; i < 8; ++i)
alphaRVec[i] = alphaRVec[i-1]*ralpha;
real X = 2*exp2a/SQRT_PI;
int doubleFactorial = 1, facCount = 1;
real tmp = alphaRVec[1];
bVec[1] = -erfAlphaR;
for (int i = 2; i < 5; ++i) {
bVec[i] = bVec[i-1] + tmp * X / (real)(doubleFactorial);
facCount = facCount + 2;
doubleFactorial = doubleFactorial * facCount;
tmp *= 2*alphaRVec[2];
}
real dmp = atom1.damp*atom2.damp;
real a = min(atom1.thole, atom2.thole);
real u = fabs(dmp) > 1.0e-5f ? r/dmp : 1e10f;
real au3 = a*u*u*u;
real expau3 = au3 < 50 ? EXP(-au3) : 0;
real a2u6 = au3*au3;
real a3u9 = a2u6*au3;
// Thole damping factors for energies
real thole_c = 1 - expau3;
real thole_d0 = 1 - expau3*(1 + 1.5f*au3);
real thole_d1 = 1 - expau3;
real thole_q0 = 1 - expau3*(1 + au3 + a2u6);
real thole_q1 = 1 - expau3*(1 + au3);
// Thole damping factors for derivatives
real dthole_c = 1 - expau3*(1 + 1.5f*au3);
real dthole_d0 = 1 - expau3*(1 + au3 + 1.5f*a2u6);
real dthole_d1 = 1 - expau3*(1 + au3);
real dthole_q0 = 1 - expau3*(1 + au3 + 0.25f*a2u6 + 0.75f*a3u9);
real dthole_q1 = 1 - expau3*(1 + au3 + 0.75f*a2u6);
// Now we compute the (attenuated) Coulomb operator and its derivatives, contracted with
// permanent moments and induced dipoles. Note that the coefficient of the permanent force
// terms is half of the expected value; this is because we compute the interaction of I with
// the sum of induced and permanent moments on J, as well as the interaction of J with I's
// permanent and induced moments; doing so double counts the permanent-permanent interaction.
real ePermCoef, dPermCoef, eUIndCoef, dUIndCoef, eUInpCoef, dUInpCoef;
// C-C terms (m=0)
ePermCoef = rInvVec[1]*(mScale + bVec[2] - alphaRVec[1]*X);
dPermCoef = -0.5f*(mScale + bVec[2])*rInvVec[2];
Vij[0] = ePermCoef*atom2.q;
Vji[0] = ePermCoef*atom1.q;
VijR[0] = dPermCoef*atom2.q;
VjiR[0] = dPermCoef*atom1.q;
// C-D and C-Uind terms (m=0)
ePermCoef = rInvVec[2]*(mScale + bVec[2]);
eUIndCoef = rInvVec[2]*(pScale*thole_c + bVec[2]);
eUInpCoef = rInvVec[2]*(dScale*thole_c + bVec[2]);
dPermCoef = -rInvVec[3]*(mScale + bVec[2] + alphaRVec[3]*X);
dUIndCoef = -2*rInvVec[3]*(pScale*dthole_c + bVec[2] + alphaRVec[3]*X);
dUInpCoef = -2*rInvVec[3]*(dScale*dthole_c + bVec[2] + alphaRVec[3]*X);
Vij[0] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] = -(ePermCoef*atom1.q);
VijR[0] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] = -(dPermCoef*atom1.q);
Vjip[0] = -(eUInpCoef*atom1.q);
Vjid[0] = -(eUIndCoef*atom1.q);
// D-C and Uind-C terms (m=0)
Vij[1] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] = eUInpCoef*atom2.q;
Vijd[0] = eUIndCoef*atom2.q;
// D-D and D-Uind terms (m=0)
const real twoThirds = (real) 2/3;
ePermCoef = -twoThirds*rInvVec[3]*(3*(mScale + bVec[3]) + alphaRVec[3]*X);
eUIndCoef = -twoThirds*rInvVec[3]*(3*(pScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
eUInpCoef = -twoThirds*rInvVec[3]*(3*(dScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
dPermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
dUIndCoef = rInvVec[4]*(6*(pScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
dUInpCoef = rInvVec[4]*(6*(dScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x;
Vji[1] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x;
VjiR[1] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedDipole2.x;
Vijd[0] += eUIndCoef*rotatedDipole2.x;
Vjip[0] += eUInpCoef*rotatedDipole1.x;
Vjid[0] += eUIndCoef*rotatedDipole1.x;
// D-D and D-Uind terms (m=1)
ePermCoef = rInvVec[3]*(mScale + bVec[3] - twoThirds*alphaRVec[3]*X);
eUIndCoef = rInvVec[3]*(pScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
eUInpCoef = rInvVec[3]*(dScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dPermCoef = -1.5f*rInvVec[4]*(mScale + bVec[3]);
dUIndCoef = -3*rInvVec[4]*(pScale*dthole_d1 + bVec[3]);
dUInpCoef = -3*rInvVec[4]*(dScale*dthole_d1 + bVec[3]);
Vij[2] = ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y;
Vji[2] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] = dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y;
VjiR[2] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] = ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z;
Vji[3] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] = dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z;
VjiR[3] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] = eUInpCoef*rotatedDipole2.y;
Vijd[1] = eUIndCoef*rotatedDipole2.y;
Vjip[1] = eUInpCoef*rotatedDipole1.y;
Vjid[1] = eUIndCoef*rotatedDipole1.y;
Vijp[2] = eUInpCoef*rotatedDipole2.z;
Vijd[2] = eUIndCoef*rotatedDipole2.z;
Vjip[2] = eUInpCoef*rotatedDipole1.z;
Vjid[2] = eUIndCoef*rotatedDipole1.z;
// C-Q terms (m=0)
ePermCoef = (mScale + bVec[3])*rInvVec[3];
dPermCoef = -((real) 1/3)*rInvVec[4]*(4.5f*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
Vij[0] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] = ePermCoef*atom1.q;
VijR[0] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] = dPermCoef*atom1.q;
// Q-C terms (m=0)
Vij[4] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedQuadrupole1[0];
// D-Q and Uind-Q terms (m=0)
const real fourThirds = (real) 4/3;
ePermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUIndCoef = rInvVec[4]*(3*(pScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUInpCoef = rInvVec[4]*(3*(dScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
dPermCoef = -fourThirds*rInvVec[5]*(4.5f*(mScale + bVec[3]) + (1 + alphaRVec[2])*alphaRVec[5]*X);
dUIndCoef = -fourThirds*rInvVec[5]*(9*(pScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
dUInpCoef = -fourThirds*rInvVec[5]*(9*(dScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedQuadrupole2[0];
Vijd[0] += eUIndCoef*rotatedQuadrupole2[0];
// Q-D and Q-Uind terms (m=0)
Vij[4] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] += -(ePermCoef*rotatedQuadrupole1[0]);
VijR[4] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] += -(dPermCoef*rotatedQuadrupole1[0]);
Vjip[0] += -(eUInpCoef*rotatedQuadrupole1[0]);
Vjid[0] += -(eUIndCoef*rotatedQuadrupole1[0]);
// D-Q and Uind-Q terms (m=1)
const real sqrtThree = SQRT((real) 3);
ePermCoef = -sqrtThree*rInvVec[4]*(mScale + bVec[3]);
eUIndCoef = -sqrtThree*rInvVec[4]*(pScale*thole_q1 + bVec[3]);
eUInpCoef = -sqrtThree*rInvVec[4]*(dScale*thole_q1 + bVec[3]);
const real fourSqrtOneThird = 4/sqrt((real) 3);
dPermCoef = fourSqrtOneThird*rInvVec[5]*(1.5f*(mScale + bVec[3]) + 0.5f*alphaRVec[5]*X);
dUIndCoef = fourSqrtOneThird*rInvVec[5]*(3*(pScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
dUInpCoef = fourSqrtOneThird*rInvVec[5]*(3*(dScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
Vij[2] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] += eUInpCoef*rotatedQuadrupole2[1];
Vijd[1] += eUIndCoef*rotatedQuadrupole2[1];
Vijp[2] += eUInpCoef*rotatedQuadrupole2[2];
Vijd[2] += eUIndCoef*rotatedQuadrupole2[2];
// D-Q and Uind-Q terms (m=1)
Vij[5] = -(ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y);
Vji[2] += -(ePermCoef*rotatedQuadrupole1[1]);
VijR[5] = -(dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y);
VjiR[2] += -(dPermCoef*rotatedQuadrupole1[1]);
Vij[6] = -(ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z);
Vji[3] += -(ePermCoef*rotatedQuadrupole1[2]);
VijR[6] = -(dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z);
VjiR[3] += -(dPermCoef*rotatedQuadrupole1[2]);
Vjip[1] += -(eUInpCoef*rotatedQuadrupole1[1]);
Vjid[1] += -(eUIndCoef*rotatedQuadrupole1[1]);
Vjip[2] += -(eUInpCoef*rotatedQuadrupole1[2]);
Vjid[2] += -(eUIndCoef*rotatedQuadrupole1[2]);
// Q-Q terms (m=0)
ePermCoef = rInvVec[5]*(6*(mScale + bVec[4]) + ((real) 4/45)*(-3 + 10*alphaRVec[2])*alphaRVec[5]*X);
dPermCoef = -rInvVec[6]*(135*(mScale + bVec[4]) + 4*(1 + 2*alphaRVec[2])*alphaRVec[7]*X)/9;
Vij[4] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedQuadrupole1[0];
// Q-Q terms (m=1)
const real fourOverFifteen = (real) 4/15;
ePermCoef = -fourOverFifteen*rInvVec[5]*(15*(mScale + bVec[4]) + alphaRVec[5]*X);
dPermCoef = rInvVec[6]*(10*(mScale + bVec[4]) + fourThirds*alphaRVec[7]*X);
Vij[5] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] += ePermCoef*rotatedQuadrupole1[1];
VijR[5] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] += dPermCoef*rotatedQuadrupole1[1];
Vij[6] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] += ePermCoef*rotatedQuadrupole1[2];
VijR[6] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] += dPermCoef*rotatedQuadrupole1[2];
// Q-Q terms (m=2)
ePermCoef = rInvVec[5]*(mScale + bVec[4] - fourOverFifteen*alphaRVec[5]*X);
dPermCoef = -2.5f*(mScale + bVec[4])*rInvVec[6];
Vij[7] = ePermCoef*rotatedQuadrupole2[3];
Vji[7] = ePermCoef*rotatedQuadrupole1[3];
VijR[7] = dPermCoef*rotatedQuadrupole2[3];
VjiR[7] = dPermCoef*rotatedQuadrupole1[3];
Vij[8] = ePermCoef*rotatedQuadrupole2[4];
Vji[8] = ePermCoef*rotatedQuadrupole1[4];
VijR[8] = dPermCoef*rotatedQuadrupole2[4];
VjiR[8] = dPermCoef*rotatedQuadrupole1[4];
// Evaluate the energies, forces and torques due to permanent+induced moments
// interacting with just the permanent moments.
energy += forceFactor*0.5f*(
atom1.q*Vij[0] + rotatedDipole1.x*Vij[1] + rotatedDipole1.y*Vij[2] + rotatedDipole1.z*Vij[3] + rotatedQuadrupole1[0]*Vij[4] + rotatedQuadrupole1[1]*Vij[5] + rotatedQuadrupole1[2]*Vij[6] + rotatedQuadrupole1[3]*Vij[7] + rotatedQuadrupole1[4]*Vij[8] +
atom2.q*Vji[0] + rotatedDipole2.x*Vji[1] + rotatedDipole2.y*Vji[2] + rotatedDipole2.z*Vji[3] + rotatedQuadrupole2[0]*Vji[4] + rotatedQuadrupole2[1]*Vji[5] + rotatedQuadrupole2[2]*Vji[6] + rotatedQuadrupole2[3]*Vji[7] + rotatedQuadrupole2[4]*Vji[8]);
real fIZ = atom1.q*VijR[0] + rotatedDipole1.x*VijR[1] + rotatedDipole1.y*VijR[2] + rotatedDipole1.z*VijR[3] + rotatedQuadrupole1[0]*VijR[4] + rotatedQuadrupole1[1]*VijR[5] + rotatedQuadrupole1[2]*VijR[6] + rotatedQuadrupole1[3]*VijR[7] + rotatedQuadrupole1[4]*VijR[8];
real fJZ = atom2.q*VjiR[0] + rotatedDipole2.x*VjiR[1] + rotatedDipole2.y*VjiR[2] + rotatedDipole2.z*VjiR[3] + rotatedQuadrupole2[0]*VjiR[4] + rotatedQuadrupole2[1]*VjiR[5] + rotatedQuadrupole2[2]*VjiR[6] + rotatedQuadrupole2[3]*VjiR[7] + rotatedQuadrupole2[4]*VjiR[8];
real EIX = rotatedDipole1.z*Vij[1] - rotatedDipole1.x*Vij[3] + sqrtThree*rotatedQuadrupole1[2]*Vij[4] + rotatedQuadrupole1[4]*Vij[5] - (sqrtThree*rotatedQuadrupole1[0]+rotatedQuadrupole1[3])*Vij[6] + rotatedQuadrupole1[2]*Vij[7] - rotatedQuadrupole1[1]*Vij[8];
real EIY = -rotatedDipole1.y*Vij[1] + rotatedDipole1.x*Vij[2] - sqrtThree*rotatedQuadrupole1[1]*Vij[4] + (sqrtThree*rotatedQuadrupole1[0]-rotatedQuadrupole1[3])*Vij[5] - rotatedQuadrupole1[4]*Vij[6] + rotatedQuadrupole1[1]*Vij[7] + rotatedQuadrupole1[2]*Vij[8];
real EIZ = -rotatedDipole1.z*Vij[2] + rotatedDipole1.y*Vij[3] - rotatedQuadrupole1[2]*Vij[5] + rotatedQuadrupole1[1]*Vij[6] - 2*rotatedQuadrupole1[4]*Vij[7] + 2*rotatedQuadrupole1[3]*Vij[8];
real EJX = rotatedDipole2.z*Vji[1] - rotatedDipole2.x*Vji[3] + sqrtThree*rotatedQuadrupole2[2]*Vji[4] + rotatedQuadrupole2[4]*Vji[5] - (sqrtThree*rotatedQuadrupole2[0]+rotatedQuadrupole2[3])*Vji[6] + rotatedQuadrupole2[2]*Vji[7] - rotatedQuadrupole2[1]*Vji[8];
real EJY = -rotatedDipole2.y*Vji[1] + rotatedDipole2.x*Vji[2] - sqrtThree*rotatedQuadrupole2[1]*Vji[4] + (sqrtThree*rotatedQuadrupole2[0]-rotatedQuadrupole2[3])*Vji[5] - rotatedQuadrupole2[4]*Vji[6] + rotatedQuadrupole2[1]*Vji[7] + rotatedQuadrupole2[2]*Vji[8];
real EJZ = -rotatedDipole2.z*Vji[2] + rotatedDipole2.y*Vji[3] - rotatedQuadrupole2[2]*Vji[5] + rotatedQuadrupole2[1]*Vji[6] - 2*rotatedQuadrupole2[4]*Vji[7] + 2*rotatedQuadrupole2[3]*Vji[8];
// Define the torque intermediates for the induced dipoles. These are simply the induced dipole torque
// intermediates dotted with the field due to permanent moments only, at each center. We inline the
// induced dipole torque intermediates here, for simplicity. N.B. There are no torques on the dipoles
// themselves, so we accumulate the torque intermediates into separate variables to allow them to be
// used only in the force calculation.
//
// The torque about the x axis (needed to obtain the y force on the induced dipoles, below)
// qiUindIx[0] = qiQUindI[2]; qiUindIx[1] = 0; qiUindIx[2] = -qiQUindI[0]
real iEIX = qiUinpI.z*Vijp[0] + qiUindI.z*Vijd[0] - qiUinpI.x*Vijp[2] - qiUindI.x*Vijd[2];
real iEJX = qiUinpJ.z*Vjip[0] + qiUindJ.z*Vjid[0] - qiUinpJ.x*Vjip[2] - qiUindJ.x*Vjid[2];
// The torque about the y axis (needed to obtain the x force on the induced dipoles, below)
// qiUindIy[0] = -qiQUindI[1]; qiUindIy[1] = qiQUindI[0]; qiUindIy[2] = 0
real iEIY = qiUinpI.x*Vijp[1] + qiUindI.x*Vijd[1] - qiUinpI.y*Vijp[0] - qiUindI.y*Vijd[0];
real iEJY = qiUinpJ.x*Vjip[1] + qiUindJ.x*Vjid[1] - qiUinpJ.y*Vjip[0] - qiUindJ.y*Vjid[0];
#ifdef MUTUAL_POLARIZATION
// Uind-Uind terms (m=0)
real eCoef = -fourThirds*rInvVec[3]*(3*(thole_d0 + bVec[3]) + alphaRVec[3]*X);
real dCoef = rInvVec[4]*(6*(dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
iEIX += eCoef*(qiUinpI.z*qiUindJ.x + qiUindI.z*qiUinpJ.x);
iEJX += eCoef*(qiUinpJ.z*qiUindI.x + qiUindJ.z*qiUinpI.x);
iEIY -= eCoef*(qiUinpI.y*qiUindJ.x + qiUindI.y*qiUinpJ.x);
iEJY -= eCoef*(qiUinpJ.y*qiUindI.x + qiUindJ.y*qiUinpI.x);
fIZ += dCoef*(qiUinpI.x*qiUindJ.x + qiUindI.x*qiUinpJ.x);
fIZ += dCoef*(qiUinpJ.x*qiUindI.x + qiUindJ.x*qiUinpI.x);
// Uind-Uind terms (m=1)
eCoef = 2*rInvVec[3]*(thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dCoef = -3*rInvVec[4]*(dthole_d1 + bVec[3]);
iEIX -= eCoef*(qiUinpI.x*qiUindJ.z + qiUindI.x*qiUinpJ.z);
iEJX -= eCoef*(qiUinpJ.x*qiUindI.z + qiUindJ.x*qiUinpI.z);
iEIY += eCoef*(qiUinpI.x*qiUindJ.y + qiUindI.x*qiUinpJ.y);
iEJY += eCoef*(qiUinpJ.x*qiUindI.y + qiUindJ.x*qiUinpI.y);
fIZ += dCoef*(qiUinpI.y*qiUindJ.y + qiUindI.y*qiUinpJ.y + qiUinpI.z*qiUindJ.z + qiUindI.z*qiUinpJ.z);
fIZ += dCoef*(qiUinpJ.y*qiUindI.y + qiUindJ.y*qiUinpI.y + qiUinpJ.z*qiUindI.z + qiUindJ.z*qiUinpI.z);
#endif
// The quasi-internal frame forces and torques. Note that the induced torque intermediates are
// used in the force expression, but not in the torques; the induced dipoles are isotropic.
real qiForce[3] = {rInv*(EIY+EJY+iEIY+iEJY), -rInv*(EIX+EJX+iEIX+iEJX), -(fJZ+fIZ)};
real qiTorqueI[3] = {-EIX, -EIY, -EIZ};
real qiTorqueJ[3] = {-EJX, -EJY, -EJZ};
real3 force = make_real3(qiRotationMatrix[1][1]*qiForce[0] + qiRotationMatrix[2][1]*qiForce[1] + qiRotationMatrix[0][1]*qiForce[2],
qiRotationMatrix[1][2]*qiForce[0] + qiRotationMatrix[2][2]*qiForce[1] + qiRotationMatrix[0][2]*qiForce[2],
qiRotationMatrix[1][0]*qiForce[0] + qiRotationMatrix[2][0]*qiForce[1] + qiRotationMatrix[0][0]*qiForce[2]);
atom1.force += force;
atom1.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueI[0] + qiRotationMatrix[2][1]*qiTorqueI[1] + qiRotationMatrix[0][1]*qiTorqueI[2],
qiRotationMatrix[1][2]*qiTorqueI[0] + qiRotationMatrix[2][2]*qiTorqueI[1] + qiRotationMatrix[0][2]*qiTorqueI[2],
qiRotationMatrix[1][0]*qiTorqueI[0] + qiRotationMatrix[2][0]*qiTorqueI[1] + qiRotationMatrix[0][0]*qiTorqueI[2]);
if (forceFactor == 1) {
atom2.force -= force;
atom2.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueJ[0] + qiRotationMatrix[2][1]*qiTorqueJ[1] + qiRotationMatrix[0][1]*qiTorqueJ[2],
qiRotationMatrix[1][2]*qiTorqueJ[0] + qiRotationMatrix[2][2]*qiTorqueJ[1] + qiRotationMatrix[0][2]*qiTorqueJ[2],
qiRotationMatrix[1][0]*qiTorqueJ[0] + qiRotationMatrix[2][0]*qiTorqueJ[1] + qiRotationMatrix[0][0]*qiTorqueJ[2]);
}
}
/**
* Compute the self energy and self torque.
*/
__device__ void computeSelfEnergyAndTorque(AtomData& atom1, mixed& energy) {
real cii = atom1.q*atom1.q;
real3 dipole = make_real3(atom1.sphericalDipole.y, atom1.sphericalDipole.z, atom1.sphericalDipole.x);
real dii = dot(dipole, dipole+(atom1.inducedDipole+atom1.inducedDipolePolar)*0.5f);
#ifdef INCLUDE_QUADRUPOLES
real qii = (atom1.sphericalQuadrupole[0]*atom1.sphericalQuadrupole[0] +
atom1.sphericalQuadrupole[1]*atom1.sphericalQuadrupole[1] +
atom1.sphericalQuadrupole[2]*atom1.sphericalQuadrupole[2] +
atom1.sphericalQuadrupole[3]*atom1.sphericalQuadrupole[3] +
atom1.sphericalQuadrupole[4]*atom1.sphericalQuadrupole[4]);
#else
real qii = 0;
#endif
real prefac = -EWALD_ALPHA/SQRT_PI;
real a2 = EWALD_ALPHA*EWALD_ALPHA;
real a4 = a2*a2;
energy += prefac*(cii + ((real)2/3)*a2*dii + ((real) 4/15)*a4*qii);
// self-torque for PME
real3 ui = atom1.inducedDipole+atom1.inducedDipolePolar;
atom1.torque += ((2/(real) 3)*(EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA)/SQRT_PI)*cross(dipole, ui);
}
/**
* Compute electrostatic interactions.
*/
extern "C" __global__ void computeElectrostatics(
unsigned long long* __restrict__ forceBuffers, unsigned long long* __restrict__ torqueBuffers, mixed* __restrict__ energyBuffer,
const real4* __restrict__ posq, const uint2* __restrict__ covalentFlags, const unsigned int* __restrict__ polarizationGroupFlags,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const unsigned int* __restrict__ interactingAtoms,
#endif
const real* __restrict__ sphericalDipole, const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole,
const real* __restrict__ inducedDipolePolar, const float2* __restrict__ dampingAndThole) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
AtomData data;
unsigned int atom1 = x*TILE_SIZE + tgx;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
uint2 covalent = covalentFlags[pos*TILE_SIZE+tgx];
unsigned int polarizationGroup = polarizationGroupFlags[pos*TILE_SIZE+tgx];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].pos = data.pos;
localData[threadIdx.x].q = data.q;
localData[threadIdx.x].sphericalDipole = data.sphericalDipole;
#ifdef INCLUDE_QUADRUPOLES
localData[threadIdx.x].sphericalQuadrupole[0] = data.sphericalQuadrupole[0];
localData[threadIdx.x].sphericalQuadrupole[1] = data.sphericalQuadrupole[1];
localData[threadIdx.x].sphericalQuadrupole[2] = data.sphericalQuadrupole[2];
localData[threadIdx.x].sphericalQuadrupole[3] = data.sphericalQuadrupole[3];
localData[threadIdx.x].sphericalQuadrupole[4] = data.sphericalQuadrupole[4];
#endif
localData[threadIdx.x].inducedDipole = data.inducedDipole;
localData[threadIdx.x].inducedDipolePolar = data.inducedDipolePolar;
localData[threadIdx.x].thole = data.thole;
localData[threadIdx.x].damp = data.damp;
// Compute forces.
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+j;
if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, j);
float p = computePScaleFactor(covalent, polarizationGroup, j);
float m = computeMScaleFactor(covalent, j);
computeOneInteraction(data, localData[tbx+j], true, d, p, m, 0.5f, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
}
if (atom1 < NUM_ATOMS)
computeSelfEnergyAndTorque(data, energy);
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[atom1], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+tj;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, tj);
float p = computePScaleFactor(covalent, polarizationGroup, tj);
float m = computeMScaleFactor(covalent, tj);
computeOneInteraction(data, localData[tbx+tj], true, d, p, m, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles)
x = tiles[pos];
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
AtomData data;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
// Compute forces.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
computeOneInteraction(data, localData[tbx+tj], false, 1, 1, 1, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
#ifdef USE_CUTOFF
offset = atomIndices[threadIdx.x];
#else
offset = y*TILE_SIZE + tgx;
#endif
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy*ENERGY_SCALE_FACTOR;
}
| 7bd4420595522427567a91d7ea075beca51c4749.cu | #define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
typedef struct {
real3 pos, force, torque, inducedDipole, inducedDipolePolar, sphericalDipole;
real q;
float thole, damp;
#ifdef INCLUDE_QUADRUPOLES
real sphericalQuadrupole[5];
#endif
} AtomData;
inline __device__ void loadAtomData(AtomData& data, int atom, const real4* __restrict__ posq, const real* __restrict__ sphericalDipole,
const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole, const real* __restrict__ inducedDipolePolar,
const float2* __restrict__ dampingAndThole) {
real4 atomPosq = posq[atom];
data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z);
data.q = atomPosq.w;
data.sphericalDipole.x = sphericalDipole[atom*3];
data.sphericalDipole.y = sphericalDipole[atom*3+1];
data.sphericalDipole.z = sphericalDipole[atom*3+2];
#ifdef INCLUDE_QUADRUPOLES
data.sphericalQuadrupole[0] = sphericalQuadrupole[atom*5];
data.sphericalQuadrupole[1] = sphericalQuadrupole[atom*5+1];
data.sphericalQuadrupole[2] = sphericalQuadrupole[atom*5+2];
data.sphericalQuadrupole[3] = sphericalQuadrupole[atom*5+3];
data.sphericalQuadrupole[4] = sphericalQuadrupole[atom*5+4];
#endif
data.inducedDipole.x = inducedDipole[atom*3];
data.inducedDipole.y = inducedDipole[atom*3+1];
data.inducedDipole.z = inducedDipole[atom*3+2];
data.inducedDipolePolar.x = inducedDipolePolar[atom*3];
data.inducedDipolePolar.y = inducedDipolePolar[atom*3+1];
data.inducedDipolePolar.z = inducedDipolePolar[atom*3+2];
float2 temp = dampingAndThole[atom];
data.damp = temp.x;
data.thole = temp.y;
}
__device__ real computeDScaleFactor(unsigned int polarizationGroup, int index) {
return (polarizationGroup & 1<<index ? 0 : 1);
}
__device__ float computeMScaleFactor(uint2 covalent, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
return (x ? (y ? 0.0f : 0.4f) : (y ? 0.8f : 1.0f));
}
__device__ float computePScaleFactor(uint2 covalent, unsigned int polarizationGroup, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
bool p = (polarizationGroup & mask);
return (x && y ? 0.0f : (x && p ? 0.5f : 1.0f));
}
__device__ void computeOneInteraction(AtomData& atom1, AtomData& atom2, bool hasExclusions, float dScale, float pScale, float mScale, float forceFactor,
mixed& energy, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
// Compute the displacement.
real3 delta;
delta.x = atom2.pos.x - atom1.pos.x;
delta.y = atom2.pos.y - atom1.pos.y;
delta.z = atom2.pos.z - atom1.pos.z;
APPLY_PERIODIC_TO_DELTA(delta)
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (r2 > CUTOFF_SQUARED)
return;
real rInv = RSQRT(r2);
real r = r2*rInv;
// Rotate the various dipoles and quadrupoles.
real qiRotationMatrix[3][3];
buildQIRotationMatrix(delta, rInv, qiRotationMatrix);
real3 qiUindI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipole.x + qiRotationMatrix[0][2]*atom1.inducedDipole.y + qiRotationMatrix[0][0]*atom1.inducedDipole.z,
qiRotationMatrix[1][1]*atom1.inducedDipole.x + qiRotationMatrix[1][2]*atom1.inducedDipole.y + qiRotationMatrix[1][0]*atom1.inducedDipole.z,
qiRotationMatrix[2][1]*atom1.inducedDipole.x + qiRotationMatrix[2][2]*atom1.inducedDipole.y + qiRotationMatrix[2][0]*atom1.inducedDipole.z);
real3 qiUindJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipole.x + qiRotationMatrix[0][2]*atom2.inducedDipole.y + qiRotationMatrix[0][0]*atom2.inducedDipole.z,
qiRotationMatrix[1][1]*atom2.inducedDipole.x + qiRotationMatrix[1][2]*atom2.inducedDipole.y + qiRotationMatrix[1][0]*atom2.inducedDipole.z,
qiRotationMatrix[2][1]*atom2.inducedDipole.x + qiRotationMatrix[2][2]*atom2.inducedDipole.y + qiRotationMatrix[2][0]*atom2.inducedDipole.z);
real3 qiUinpI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom1.inducedDipolePolar.z);
real3 qiUinpJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom2.inducedDipolePolar.z);
real3 rotatedDipole1 = rotateDipole(atom1.sphericalDipole, qiRotationMatrix);
real3 rotatedDipole2 = rotateDipole(atom2.sphericalDipole, qiRotationMatrix);
real rotatedQuadrupole1[] = {0, 0, 0, 0, 0};
real rotatedQuadrupole2[] = {0, 0, 0, 0, 0};
#ifdef INCLUDE_QUADRUPOLES
rotateQuadupoles(qiRotationMatrix, atom1.sphericalQuadrupole, atom2.sphericalQuadrupole, rotatedQuadrupole1, rotatedQuadrupole2);
#endif
// The field derivatives at I due to permanent and induced moments on J, and vice-versa.
// Also, their derivatives w.r.t. R, which are needed for force calculations
real Vij[9], Vji[9], VjiR[9], VijR[9];
// The field derivatives at I due to only permanent moments on J, and vice-versa.
real Vijp[3], Vijd[3], Vjip[3], Vjid[3];
real rInvVec[7], alphaRVec[8], bVec[5];
// The rInvVec array is defined such that the ith element is R^-i, with the
// dieleectric constant folded in, to avoid conversions later.
rInvVec[1] = rInv;
for (int i = 2; i < 7; ++i)
rInvVec[i] = rInvVec[i-1] * rInv;
// The alpharVec array is defined such that the ith element is (alpha R)^i,
// where kappa (alpha in OpenMM parlance) is the Ewald attenuation parameter.
real ralpha = EWALD_ALPHA*r;
real exp2a = EXP(-(ralpha*ralpha));
#ifdef USE_DOUBLE_PRECISION
const real erfAlphaR = erf(ralpha);
#else
// This approximation for erfc is from Abramowitz and Stegun (1964) p. 299. They cite the following as
// the original source: C. Hastings, Jr., Approximations for Digital Computers (1955). It has a maximum
// error of 1.5e-7.
const real t = RECIP(1.0f+0.3275911f*ralpha);
const real erfAlphaR = 1-(0.254829592f+(-0.284496736f+(1.421413741f+(-1.453152027f+1.061405429f*t)*t)*t)*t)*t*exp2a;
#endif
alphaRVec[1] = ralpha;
for (int i = 2; i < 8; ++i)
alphaRVec[i] = alphaRVec[i-1]*ralpha;
real X = 2*exp2a/SQRT_PI;
int doubleFactorial = 1, facCount = 1;
real tmp = alphaRVec[1];
bVec[1] = -erfAlphaR;
for (int i = 2; i < 5; ++i) {
bVec[i] = bVec[i-1] + tmp * X / (real)(doubleFactorial);
facCount = facCount + 2;
doubleFactorial = doubleFactorial * facCount;
tmp *= 2*alphaRVec[2];
}
real dmp = atom1.damp*atom2.damp;
real a = min(atom1.thole, atom2.thole);
real u = fabs(dmp) > 1.0e-5f ? r/dmp : 1e10f;
real au3 = a*u*u*u;
real expau3 = au3 < 50 ? EXP(-au3) : 0;
real a2u6 = au3*au3;
real a3u9 = a2u6*au3;
// Thole damping factors for energies
real thole_c = 1 - expau3;
real thole_d0 = 1 - expau3*(1 + 1.5f*au3);
real thole_d1 = 1 - expau3;
real thole_q0 = 1 - expau3*(1 + au3 + a2u6);
real thole_q1 = 1 - expau3*(1 + au3);
// Thole damping factors for derivatives
real dthole_c = 1 - expau3*(1 + 1.5f*au3);
real dthole_d0 = 1 - expau3*(1 + au3 + 1.5f*a2u6);
real dthole_d1 = 1 - expau3*(1 + au3);
real dthole_q0 = 1 - expau3*(1 + au3 + 0.25f*a2u6 + 0.75f*a3u9);
real dthole_q1 = 1 - expau3*(1 + au3 + 0.75f*a2u6);
// Now we compute the (attenuated) Coulomb operator and its derivatives, contracted with
// permanent moments and induced dipoles. Note that the coefficient of the permanent force
// terms is half of the expected value; this is because we compute the interaction of I with
// the sum of induced and permanent moments on J, as well as the interaction of J with I's
// permanent and induced moments; doing so double counts the permanent-permanent interaction.
real ePermCoef, dPermCoef, eUIndCoef, dUIndCoef, eUInpCoef, dUInpCoef;
// C-C terms (m=0)
ePermCoef = rInvVec[1]*(mScale + bVec[2] - alphaRVec[1]*X);
dPermCoef = -0.5f*(mScale + bVec[2])*rInvVec[2];
Vij[0] = ePermCoef*atom2.q;
Vji[0] = ePermCoef*atom1.q;
VijR[0] = dPermCoef*atom2.q;
VjiR[0] = dPermCoef*atom1.q;
// C-D and C-Uind terms (m=0)
ePermCoef = rInvVec[2]*(mScale + bVec[2]);
eUIndCoef = rInvVec[2]*(pScale*thole_c + bVec[2]);
eUInpCoef = rInvVec[2]*(dScale*thole_c + bVec[2]);
dPermCoef = -rInvVec[3]*(mScale + bVec[2] + alphaRVec[3]*X);
dUIndCoef = -2*rInvVec[3]*(pScale*dthole_c + bVec[2] + alphaRVec[3]*X);
dUInpCoef = -2*rInvVec[3]*(dScale*dthole_c + bVec[2] + alphaRVec[3]*X);
Vij[0] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] = -(ePermCoef*atom1.q);
VijR[0] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] = -(dPermCoef*atom1.q);
Vjip[0] = -(eUInpCoef*atom1.q);
Vjid[0] = -(eUIndCoef*atom1.q);
// D-C and Uind-C terms (m=0)
Vij[1] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] = eUInpCoef*atom2.q;
Vijd[0] = eUIndCoef*atom2.q;
// D-D and D-Uind terms (m=0)
const real twoThirds = (real) 2/3;
ePermCoef = -twoThirds*rInvVec[3]*(3*(mScale + bVec[3]) + alphaRVec[3]*X);
eUIndCoef = -twoThirds*rInvVec[3]*(3*(pScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
eUInpCoef = -twoThirds*rInvVec[3]*(3*(dScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
dPermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
dUIndCoef = rInvVec[4]*(6*(pScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
dUInpCoef = rInvVec[4]*(6*(dScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x;
Vji[1] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x;
VjiR[1] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedDipole2.x;
Vijd[0] += eUIndCoef*rotatedDipole2.x;
Vjip[0] += eUInpCoef*rotatedDipole1.x;
Vjid[0] += eUIndCoef*rotatedDipole1.x;
// D-D and D-Uind terms (m=1)
ePermCoef = rInvVec[3]*(mScale + bVec[3] - twoThirds*alphaRVec[3]*X);
eUIndCoef = rInvVec[3]*(pScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
eUInpCoef = rInvVec[3]*(dScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dPermCoef = -1.5f*rInvVec[4]*(mScale + bVec[3]);
dUIndCoef = -3*rInvVec[4]*(pScale*dthole_d1 + bVec[3]);
dUInpCoef = -3*rInvVec[4]*(dScale*dthole_d1 + bVec[3]);
Vij[2] = ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y;
Vji[2] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] = dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y;
VjiR[2] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] = ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z;
Vji[3] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] = dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z;
VjiR[3] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] = eUInpCoef*rotatedDipole2.y;
Vijd[1] = eUIndCoef*rotatedDipole2.y;
Vjip[1] = eUInpCoef*rotatedDipole1.y;
Vjid[1] = eUIndCoef*rotatedDipole1.y;
Vijp[2] = eUInpCoef*rotatedDipole2.z;
Vijd[2] = eUIndCoef*rotatedDipole2.z;
Vjip[2] = eUInpCoef*rotatedDipole1.z;
Vjid[2] = eUIndCoef*rotatedDipole1.z;
// C-Q terms (m=0)
ePermCoef = (mScale + bVec[3])*rInvVec[3];
dPermCoef = -((real) 1/3)*rInvVec[4]*(4.5f*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
Vij[0] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] = ePermCoef*atom1.q;
VijR[0] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] = dPermCoef*atom1.q;
// Q-C terms (m=0)
Vij[4] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedQuadrupole1[0];
// D-Q and Uind-Q terms (m=0)
const real fourThirds = (real) 4/3;
ePermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUIndCoef = rInvVec[4]*(3*(pScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUInpCoef = rInvVec[4]*(3*(dScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
dPermCoef = -fourThirds*rInvVec[5]*(4.5f*(mScale + bVec[3]) + (1 + alphaRVec[2])*alphaRVec[5]*X);
dUIndCoef = -fourThirds*rInvVec[5]*(9*(pScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
dUInpCoef = -fourThirds*rInvVec[5]*(9*(dScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedQuadrupole2[0];
Vijd[0] += eUIndCoef*rotatedQuadrupole2[0];
// Q-D and Q-Uind terms (m=0)
Vij[4] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] += -(ePermCoef*rotatedQuadrupole1[0]);
VijR[4] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] += -(dPermCoef*rotatedQuadrupole1[0]);
Vjip[0] += -(eUInpCoef*rotatedQuadrupole1[0]);
Vjid[0] += -(eUIndCoef*rotatedQuadrupole1[0]);
// D-Q and Uind-Q terms (m=1)
const real sqrtThree = SQRT((real) 3);
ePermCoef = -sqrtThree*rInvVec[4]*(mScale + bVec[3]);
eUIndCoef = -sqrtThree*rInvVec[4]*(pScale*thole_q1 + bVec[3]);
eUInpCoef = -sqrtThree*rInvVec[4]*(dScale*thole_q1 + bVec[3]);
const real fourSqrtOneThird = 4/sqrt((real) 3);
dPermCoef = fourSqrtOneThird*rInvVec[5]*(1.5f*(mScale + bVec[3]) + 0.5f*alphaRVec[5]*X);
dUIndCoef = fourSqrtOneThird*rInvVec[5]*(3*(pScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
dUInpCoef = fourSqrtOneThird*rInvVec[5]*(3*(dScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
Vij[2] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] += eUInpCoef*rotatedQuadrupole2[1];
Vijd[1] += eUIndCoef*rotatedQuadrupole2[1];
Vijp[2] += eUInpCoef*rotatedQuadrupole2[2];
Vijd[2] += eUIndCoef*rotatedQuadrupole2[2];
// D-Q and Uind-Q terms (m=1)
Vij[5] = -(ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y);
Vji[2] += -(ePermCoef*rotatedQuadrupole1[1]);
VijR[5] = -(dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y);
VjiR[2] += -(dPermCoef*rotatedQuadrupole1[1]);
Vij[6] = -(ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z);
Vji[3] += -(ePermCoef*rotatedQuadrupole1[2]);
VijR[6] = -(dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z);
VjiR[3] += -(dPermCoef*rotatedQuadrupole1[2]);
Vjip[1] += -(eUInpCoef*rotatedQuadrupole1[1]);
Vjid[1] += -(eUIndCoef*rotatedQuadrupole1[1]);
Vjip[2] += -(eUInpCoef*rotatedQuadrupole1[2]);
Vjid[2] += -(eUIndCoef*rotatedQuadrupole1[2]);
// Q-Q terms (m=0)
ePermCoef = rInvVec[5]*(6*(mScale + bVec[4]) + ((real) 4/45)*(-3 + 10*alphaRVec[2])*alphaRVec[5]*X);
dPermCoef = -rInvVec[6]*(135*(mScale + bVec[4]) + 4*(1 + 2*alphaRVec[2])*alphaRVec[7]*X)/9;
Vij[4] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedQuadrupole1[0];
// Q-Q terms (m=1)
const real fourOverFifteen = (real) 4/15;
ePermCoef = -fourOverFifteen*rInvVec[5]*(15*(mScale + bVec[4]) + alphaRVec[5]*X);
dPermCoef = rInvVec[6]*(10*(mScale + bVec[4]) + fourThirds*alphaRVec[7]*X);
Vij[5] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] += ePermCoef*rotatedQuadrupole1[1];
VijR[5] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] += dPermCoef*rotatedQuadrupole1[1];
Vij[6] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] += ePermCoef*rotatedQuadrupole1[2];
VijR[6] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] += dPermCoef*rotatedQuadrupole1[2];
// Q-Q terms (m=2)
ePermCoef = rInvVec[5]*(mScale + bVec[4] - fourOverFifteen*alphaRVec[5]*X);
dPermCoef = -2.5f*(mScale + bVec[4])*rInvVec[6];
Vij[7] = ePermCoef*rotatedQuadrupole2[3];
Vji[7] = ePermCoef*rotatedQuadrupole1[3];
VijR[7] = dPermCoef*rotatedQuadrupole2[3];
VjiR[7] = dPermCoef*rotatedQuadrupole1[3];
Vij[8] = ePermCoef*rotatedQuadrupole2[4];
Vji[8] = ePermCoef*rotatedQuadrupole1[4];
VijR[8] = dPermCoef*rotatedQuadrupole2[4];
VjiR[8] = dPermCoef*rotatedQuadrupole1[4];
// Evaluate the energies, forces and torques due to permanent+induced moments
// interacting with just the permanent moments.
energy += forceFactor*0.5f*(
atom1.q*Vij[0] + rotatedDipole1.x*Vij[1] + rotatedDipole1.y*Vij[2] + rotatedDipole1.z*Vij[3] + rotatedQuadrupole1[0]*Vij[4] + rotatedQuadrupole1[1]*Vij[5] + rotatedQuadrupole1[2]*Vij[6] + rotatedQuadrupole1[3]*Vij[7] + rotatedQuadrupole1[4]*Vij[8] +
atom2.q*Vji[0] + rotatedDipole2.x*Vji[1] + rotatedDipole2.y*Vji[2] + rotatedDipole2.z*Vji[3] + rotatedQuadrupole2[0]*Vji[4] + rotatedQuadrupole2[1]*Vji[5] + rotatedQuadrupole2[2]*Vji[6] + rotatedQuadrupole2[3]*Vji[7] + rotatedQuadrupole2[4]*Vji[8]);
real fIZ = atom1.q*VijR[0] + rotatedDipole1.x*VijR[1] + rotatedDipole1.y*VijR[2] + rotatedDipole1.z*VijR[3] + rotatedQuadrupole1[0]*VijR[4] + rotatedQuadrupole1[1]*VijR[5] + rotatedQuadrupole1[2]*VijR[6] + rotatedQuadrupole1[3]*VijR[7] + rotatedQuadrupole1[4]*VijR[8];
real fJZ = atom2.q*VjiR[0] + rotatedDipole2.x*VjiR[1] + rotatedDipole2.y*VjiR[2] + rotatedDipole2.z*VjiR[3] + rotatedQuadrupole2[0]*VjiR[4] + rotatedQuadrupole2[1]*VjiR[5] + rotatedQuadrupole2[2]*VjiR[6] + rotatedQuadrupole2[3]*VjiR[7] + rotatedQuadrupole2[4]*VjiR[8];
real EIX = rotatedDipole1.z*Vij[1] - rotatedDipole1.x*Vij[3] + sqrtThree*rotatedQuadrupole1[2]*Vij[4] + rotatedQuadrupole1[4]*Vij[5] - (sqrtThree*rotatedQuadrupole1[0]+rotatedQuadrupole1[3])*Vij[6] + rotatedQuadrupole1[2]*Vij[7] - rotatedQuadrupole1[1]*Vij[8];
real EIY = -rotatedDipole1.y*Vij[1] + rotatedDipole1.x*Vij[2] - sqrtThree*rotatedQuadrupole1[1]*Vij[4] + (sqrtThree*rotatedQuadrupole1[0]-rotatedQuadrupole1[3])*Vij[5] - rotatedQuadrupole1[4]*Vij[6] + rotatedQuadrupole1[1]*Vij[7] + rotatedQuadrupole1[2]*Vij[8];
real EIZ = -rotatedDipole1.z*Vij[2] + rotatedDipole1.y*Vij[3] - rotatedQuadrupole1[2]*Vij[5] + rotatedQuadrupole1[1]*Vij[6] - 2*rotatedQuadrupole1[4]*Vij[7] + 2*rotatedQuadrupole1[3]*Vij[8];
real EJX = rotatedDipole2.z*Vji[1] - rotatedDipole2.x*Vji[3] + sqrtThree*rotatedQuadrupole2[2]*Vji[4] + rotatedQuadrupole2[4]*Vji[5] - (sqrtThree*rotatedQuadrupole2[0]+rotatedQuadrupole2[3])*Vji[6] + rotatedQuadrupole2[2]*Vji[7] - rotatedQuadrupole2[1]*Vji[8];
real EJY = -rotatedDipole2.y*Vji[1] + rotatedDipole2.x*Vji[2] - sqrtThree*rotatedQuadrupole2[1]*Vji[4] + (sqrtThree*rotatedQuadrupole2[0]-rotatedQuadrupole2[3])*Vji[5] - rotatedQuadrupole2[4]*Vji[6] + rotatedQuadrupole2[1]*Vji[7] + rotatedQuadrupole2[2]*Vji[8];
real EJZ = -rotatedDipole2.z*Vji[2] + rotatedDipole2.y*Vji[3] - rotatedQuadrupole2[2]*Vji[5] + rotatedQuadrupole2[1]*Vji[6] - 2*rotatedQuadrupole2[4]*Vji[7] + 2*rotatedQuadrupole2[3]*Vji[8];
// Define the torque intermediates for the induced dipoles. These are simply the induced dipole torque
// intermediates dotted with the field due to permanent moments only, at each center. We inline the
// induced dipole torque intermediates here, for simplicity. N.B. There are no torques on the dipoles
// themselves, so we accumulate the torque intermediates into separate variables to allow them to be
// used only in the force calculation.
//
// The torque about the x axis (needed to obtain the y force on the induced dipoles, below)
// qiUindIx[0] = qiQUindI[2]; qiUindIx[1] = 0; qiUindIx[2] = -qiQUindI[0]
real iEIX = qiUinpI.z*Vijp[0] + qiUindI.z*Vijd[0] - qiUinpI.x*Vijp[2] - qiUindI.x*Vijd[2];
real iEJX = qiUinpJ.z*Vjip[0] + qiUindJ.z*Vjid[0] - qiUinpJ.x*Vjip[2] - qiUindJ.x*Vjid[2];
// The torque about the y axis (needed to obtain the x force on the induced dipoles, below)
// qiUindIy[0] = -qiQUindI[1]; qiUindIy[1] = qiQUindI[0]; qiUindIy[2] = 0
real iEIY = qiUinpI.x*Vijp[1] + qiUindI.x*Vijd[1] - qiUinpI.y*Vijp[0] - qiUindI.y*Vijd[0];
real iEJY = qiUinpJ.x*Vjip[1] + qiUindJ.x*Vjid[1] - qiUinpJ.y*Vjip[0] - qiUindJ.y*Vjid[0];
#ifdef MUTUAL_POLARIZATION
// Uind-Uind terms (m=0)
real eCoef = -fourThirds*rInvVec[3]*(3*(thole_d0 + bVec[3]) + alphaRVec[3]*X);
real dCoef = rInvVec[4]*(6*(dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
iEIX += eCoef*(qiUinpI.z*qiUindJ.x + qiUindI.z*qiUinpJ.x);
iEJX += eCoef*(qiUinpJ.z*qiUindI.x + qiUindJ.z*qiUinpI.x);
iEIY -= eCoef*(qiUinpI.y*qiUindJ.x + qiUindI.y*qiUinpJ.x);
iEJY -= eCoef*(qiUinpJ.y*qiUindI.x + qiUindJ.y*qiUinpI.x);
fIZ += dCoef*(qiUinpI.x*qiUindJ.x + qiUindI.x*qiUinpJ.x);
fIZ += dCoef*(qiUinpJ.x*qiUindI.x + qiUindJ.x*qiUinpI.x);
// Uind-Uind terms (m=1)
eCoef = 2*rInvVec[3]*(thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dCoef = -3*rInvVec[4]*(dthole_d1 + bVec[3]);
iEIX -= eCoef*(qiUinpI.x*qiUindJ.z + qiUindI.x*qiUinpJ.z);
iEJX -= eCoef*(qiUinpJ.x*qiUindI.z + qiUindJ.x*qiUinpI.z);
iEIY += eCoef*(qiUinpI.x*qiUindJ.y + qiUindI.x*qiUinpJ.y);
iEJY += eCoef*(qiUinpJ.x*qiUindI.y + qiUindJ.x*qiUinpI.y);
fIZ += dCoef*(qiUinpI.y*qiUindJ.y + qiUindI.y*qiUinpJ.y + qiUinpI.z*qiUindJ.z + qiUindI.z*qiUinpJ.z);
fIZ += dCoef*(qiUinpJ.y*qiUindI.y + qiUindJ.y*qiUinpI.y + qiUinpJ.z*qiUindI.z + qiUindJ.z*qiUinpI.z);
#endif
// The quasi-internal frame forces and torques. Note that the induced torque intermediates are
// used in the force expression, but not in the torques; the induced dipoles are isotropic.
real qiForce[3] = {rInv*(EIY+EJY+iEIY+iEJY), -rInv*(EIX+EJX+iEIX+iEJX), -(fJZ+fIZ)};
real qiTorqueI[3] = {-EIX, -EIY, -EIZ};
real qiTorqueJ[3] = {-EJX, -EJY, -EJZ};
real3 force = make_real3(qiRotationMatrix[1][1]*qiForce[0] + qiRotationMatrix[2][1]*qiForce[1] + qiRotationMatrix[0][1]*qiForce[2],
qiRotationMatrix[1][2]*qiForce[0] + qiRotationMatrix[2][2]*qiForce[1] + qiRotationMatrix[0][2]*qiForce[2],
qiRotationMatrix[1][0]*qiForce[0] + qiRotationMatrix[2][0]*qiForce[1] + qiRotationMatrix[0][0]*qiForce[2]);
atom1.force += force;
atom1.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueI[0] + qiRotationMatrix[2][1]*qiTorqueI[1] + qiRotationMatrix[0][1]*qiTorqueI[2],
qiRotationMatrix[1][2]*qiTorqueI[0] + qiRotationMatrix[2][2]*qiTorqueI[1] + qiRotationMatrix[0][2]*qiTorqueI[2],
qiRotationMatrix[1][0]*qiTorqueI[0] + qiRotationMatrix[2][0]*qiTorqueI[1] + qiRotationMatrix[0][0]*qiTorqueI[2]);
if (forceFactor == 1) {
atom2.force -= force;
atom2.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueJ[0] + qiRotationMatrix[2][1]*qiTorqueJ[1] + qiRotationMatrix[0][1]*qiTorqueJ[2],
qiRotationMatrix[1][2]*qiTorqueJ[0] + qiRotationMatrix[2][2]*qiTorqueJ[1] + qiRotationMatrix[0][2]*qiTorqueJ[2],
qiRotationMatrix[1][0]*qiTorqueJ[0] + qiRotationMatrix[2][0]*qiTorqueJ[1] + qiRotationMatrix[0][0]*qiTorqueJ[2]);
}
}
/**
* Compute the self energy and self torque.
*/
__device__ void computeSelfEnergyAndTorque(AtomData& atom1, mixed& energy) {
real cii = atom1.q*atom1.q;
real3 dipole = make_real3(atom1.sphericalDipole.y, atom1.sphericalDipole.z, atom1.sphericalDipole.x);
real dii = dot(dipole, dipole+(atom1.inducedDipole+atom1.inducedDipolePolar)*0.5f);
#ifdef INCLUDE_QUADRUPOLES
real qii = (atom1.sphericalQuadrupole[0]*atom1.sphericalQuadrupole[0] +
atom1.sphericalQuadrupole[1]*atom1.sphericalQuadrupole[1] +
atom1.sphericalQuadrupole[2]*atom1.sphericalQuadrupole[2] +
atom1.sphericalQuadrupole[3]*atom1.sphericalQuadrupole[3] +
atom1.sphericalQuadrupole[4]*atom1.sphericalQuadrupole[4]);
#else
real qii = 0;
#endif
real prefac = -EWALD_ALPHA/SQRT_PI;
real a2 = EWALD_ALPHA*EWALD_ALPHA;
real a4 = a2*a2;
energy += prefac*(cii + ((real)2/3)*a2*dii + ((real) 4/15)*a4*qii);
// self-torque for PME
real3 ui = atom1.inducedDipole+atom1.inducedDipolePolar;
atom1.torque += ((2/(real) 3)*(EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA)/SQRT_PI)*cross(dipole, ui);
}
/**
* Compute electrostatic interactions.
*/
extern "C" __global__ void computeElectrostatics(
unsigned long long* __restrict__ forceBuffers, unsigned long long* __restrict__ torqueBuffers, mixed* __restrict__ energyBuffer,
const real4* __restrict__ posq, const uint2* __restrict__ covalentFlags, const unsigned int* __restrict__ polarizationGroupFlags,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const unsigned int* __restrict__ interactingAtoms,
#endif
const real* __restrict__ sphericalDipole, const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole,
const real* __restrict__ inducedDipolePolar, const float2* __restrict__ dampingAndThole) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
AtomData data;
unsigned int atom1 = x*TILE_SIZE + tgx;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
uint2 covalent = covalentFlags[pos*TILE_SIZE+tgx];
unsigned int polarizationGroup = polarizationGroupFlags[pos*TILE_SIZE+tgx];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].pos = data.pos;
localData[threadIdx.x].q = data.q;
localData[threadIdx.x].sphericalDipole = data.sphericalDipole;
#ifdef INCLUDE_QUADRUPOLES
localData[threadIdx.x].sphericalQuadrupole[0] = data.sphericalQuadrupole[0];
localData[threadIdx.x].sphericalQuadrupole[1] = data.sphericalQuadrupole[1];
localData[threadIdx.x].sphericalQuadrupole[2] = data.sphericalQuadrupole[2];
localData[threadIdx.x].sphericalQuadrupole[3] = data.sphericalQuadrupole[3];
localData[threadIdx.x].sphericalQuadrupole[4] = data.sphericalQuadrupole[4];
#endif
localData[threadIdx.x].inducedDipole = data.inducedDipole;
localData[threadIdx.x].inducedDipolePolar = data.inducedDipolePolar;
localData[threadIdx.x].thole = data.thole;
localData[threadIdx.x].damp = data.damp;
// Compute forces.
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+j;
if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, j);
float p = computePScaleFactor(covalent, polarizationGroup, j);
float m = computeMScaleFactor(covalent, j);
computeOneInteraction(data, localData[tbx+j], true, d, p, m, 0.5f, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
}
if (atom1 < NUM_ATOMS)
computeSelfEnergyAndTorque(data, energy);
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[atom1], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+tj;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, tj);
float p = computePScaleFactor(covalent, polarizationGroup, tj);
float m = computeMScaleFactor(covalent, tj);
computeOneInteraction(data, localData[tbx+tj], true, d, p, m, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles)
x = tiles[pos];
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
AtomData data;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
// Compute forces.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
computeOneInteraction(data, localData[tbx+tj], false, 1, 1, 1, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
#ifdef USE_CUTOFF
offset = atomIndices[threadIdx.x];
#else
offset = y*TILE_SIZE + tgx;
#endif
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy*ENERGY_SCALE_FACTOR;
}
|
2926737c8824996dd1387e335d3e0417270f3483.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "returnQ1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int dim = 1;
const int n = 1;
const float *p1 = NULL;
hipMalloc(&p1, XSIZE*YSIZE);
const float *p0 = NULL;
hipMalloc(&p0, XSIZE*YSIZE);
const float *s1 = NULL;
hipMalloc(&s1, XSIZE*YSIZE);
const float *s0 = NULL;
hipMalloc(&s0, XSIZE*YSIZE);
const float *zr = NULL;
hipMalloc(&zr, XSIZE*YSIZE);
float *q = NULL;
hipMalloc(&q, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
returnQ1), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,n,p1,p0,s1,s0,zr,q);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
returnQ1), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,n,p1,p0,s1,s0,zr,q);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
returnQ1), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,n,p1,p0,s1,s0,zr,q);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2926737c8824996dd1387e335d3e0417270f3483.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "returnQ1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int dim = 1;
const int n = 1;
const float *p1 = NULL;
cudaMalloc(&p1, XSIZE*YSIZE);
const float *p0 = NULL;
cudaMalloc(&p0, XSIZE*YSIZE);
const float *s1 = NULL;
cudaMalloc(&s1, XSIZE*YSIZE);
const float *s0 = NULL;
cudaMalloc(&s0, XSIZE*YSIZE);
const float *zr = NULL;
cudaMalloc(&zr, XSIZE*YSIZE);
float *q = NULL;
cudaMalloc(&q, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
returnQ1<<<gridBlock,threadBlock>>>(dim,n,p1,p0,s1,s0,zr,q);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
returnQ1<<<gridBlock,threadBlock>>>(dim,n,p1,p0,s1,s0,zr,q);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
returnQ1<<<gridBlock,threadBlock>>>(dim,n,p1,p0,s1,s0,zr,q);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d341ee709f80f8a1976893bbd8653676084e57fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "object/light/spot_light.hpp"
#include <cfloat>
using namespace px;
BaseSpotLight::BaseSpotLight(Point const &pos,
Direction const &direction,
PREC const &half_angle1,
PREC const &half_angle2,
PREC const &falloff)
: _pos(pos), _dir(direction), _falloff(falloff)
{
setAngles(half_angle1, half_angle2);
}
void BaseSpotLight::setPos(Point const &pos)
{
_pos = pos;
}
void BaseSpotLight::setDir(Direction const &direction)
{
_dir = direction;
}
void BaseSpotLight::setAngles(PREC const &half_angle1, PREC const &half_angle2)
{
_inner_ha = half_angle1 < 0 ?
::fmod(half_angle1, PREC(PI2)) + PREC(PI2) : ::fmod(half_angle1,PREC(PI2));
_outer_ha = half_angle2 < 0 ?
::fmod(half_angle2, PREC(PI2)) + PREC(PI2) : ::fmod(half_angle2,PREC(PI2));
if (_inner_ha_cosine > PI)
_inner_ha_cosine = PI;
if (_outer_ha_cosine > PI)
_outer_ha_cosine = PI;
if (_outer_ha < _inner_ha)
{
auto tmp = _inner_ha;
_inner_ha = _outer_ha;
_outer_ha = tmp;
}
_inner_ha_cosine = std::cos(_inner_ha);
_outer_ha_cosine = std::cos(_outer_ha);
_multiplier = 1.0 / (_outer_ha_cosine - _inner_ha_cosine);
}
void BaseSpotLight::setFalloff(PREC const &falloff)
{
_falloff = falloff;
}
PX_CUDA_CALLABLE
PREC BaseSpotLight::attenuate(void *const &obj,
PREC const &x, PREC const &y, PREC const &z)
{
auto o = reinterpret_cast<BaseSpotLight*>(obj);
PREC dx = x-o->_pos.x;
PREC dy = y-o->_pos.y;
PREC dz = z-o->_pos.z;
PREC nrm2 = dx*dx + dy*dy + dz*dz;
if (nrm2 < EPSILON)
return FLT_MAX;
PREC nrm = std::sqrt(nrm2);
dx /= nrm;
dy /= nrm;
dz /= nrm;
PREC cosine = o->_dir.x * dx + o->_dir.y * dy + o->_dir.z * dz;
if (cosine >= o->_inner_ha_cosine)
return 1.0/nrm2;
if (cosine > o->_outer_ha_cosine)
return ::pow(((o->_outer_ha_cosine-cosine)*o->_multiplier), o->_falloff)/nrm2;
return 0;
}
Direction BaseSpotLight::dirFromHost(BaseSpotLight *const &obj,
PREC const &x, PREC const &y,
PREC const &z, PREC &dist)
{
auto dx = obj->_pos.x - x;
auto dy = obj->_pos.y - y;
auto dz = obj->_pos.z - z;
dist = std::sqrt(dx*dx + dy*dy + dz*dz);
return {dx, dy, dz};
}
PX_CUDA_CALLABLE
Direction BaseSpotLight::dirFromDevice(void *const &obj, PREC const &x,
PREC const &y, PREC const &z,
PREC &dist,
hiprandState_t * const &)
{
auto o = reinterpret_cast<BaseSpotLight*>(obj);
auto dx = o->_pos.x - x;
auto dy = o->_pos.y - y;
auto dz = o->_pos.z - z;
dist = std::sqrt(dx*dx + dy*dy + dz*dz);
return {dx, dy, dz};
}
const LightType SpotLight::TYPE = LightType::PointLight;
std::shared_ptr<BaseLight> SpotLight::create(Light const &light,
Point const &pos,
Direction const &direction,
PREC const &half_angle1,
PREC const &half_angle2,
PREC const &falloff)
{
return std::shared_ptr<BaseLight>(new SpotLight(light, pos, direction,
half_angle1, half_angle2,
falloff));
}
SpotLight::SpotLight(Light const &light,
Point const &pos,
Direction const &direction,
PREC const &half_angle1,
PREC const &half_angle2,
PREC const &falloff)
: BaseLight(TYPE, light),
_obj(new BaseSpotLight(pos, direction,
half_angle1, half_angle2,
falloff)),
_gpu_obj(nullptr),
_need_upload(true)
{}
SpotLight::~SpotLight()
{
delete _obj;
#ifdef USE_ROCM
clearGpuData();
#endif
}
PREC SpotLight::attenuate(PREC const &x, PREC const &y, PREC const &z) const
{
return BaseSpotLight::attenuate(_obj, x, y, z);
}
Direction SpotLight::dirFrom(PREC const &x, PREC const &y, PREC const &z, PREC &dist) const
{
return BaseSpotLight::dirFromHost(_obj, x, y, z, dist);
}
void SpotLight::setPos(Point const &pos)
{
_obj->setPos(pos);
#ifdef USE_ROCM
_need_upload = true;
#endif
}
void SpotLight::setDir(Direction const &direction)
{
_obj->setDir(direction);
#ifdef USE_ROCM
_need_upload = true;
#endif
}
void SpotLight::setAngles(PREC const &half_angle1, PREC const &half_angle2)
{
_obj->setAngles(half_angle1, half_angle2);
#ifdef USE_ROCM
_need_upload = true;
#endif
}
void SpotLight::setFalloff(PREC const &falloff)
{
_obj->setFalloff(falloff);
#ifdef USE_ROCM
_need_upload = true;
#endif
}
#ifdef USE_ROCM
__device__ fnAttenuate_t __fn_attenuate_spot_light = BaseSpotLight::attenuate;
__device__ fnDirFrom_t __fn_dir_from_spot_light = BaseSpotLight::dirFromDevice;
#endif
void SpotLight::up2Gpu()
{
#ifdef USE_ROCM
static fnAttenuate_t fn_attenuate_h = nullptr;
static fnDirFrom_t fn_dir_from_h;
if (_need_upload || BaseLight::need_upload)
{
if (dev_ptr == nullptr)
{
PX_CUDA_CHECK(hipMalloc(&_gpu_obj, sizeof(BaseSpotLight)));
PX_CUDA_CHECK(hipMalloc(&dev_ptr, sizeof(LightObj)));
}
if (fn_attenuate_h == nullptr)
{
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_attenuate_h, __fn_attenuate_spot_light, sizeof(fnAttenuate_t)));
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_dir_from_h, __fn_dir_from_spot_light, sizeof(fnDirFrom_t)));
}
PX_CUDA_CHECK(hipMemcpy(_gpu_obj, _obj, sizeof(BaseSpotLight),
hipMemcpyHostToDevice));
LightObj tmp(_gpu_obj, type, _light, fn_attenuate_h, fn_dir_from_h);
PX_CUDA_CHECK(hipMemcpy(dev_ptr, &tmp, sizeof(LightObj),
hipMemcpyHostToDevice));
_need_upload = false;
BaseLight::need_upload = false;
}
#endif
}
void SpotLight::clearGpuData()
{
#ifdef USE_ROCM
if (_gpu_obj != nullptr)
{
PX_CUDA_CHECK(hipFree(_gpu_obj));
_gpu_obj = nullptr;
}
BaseLight::clearGpuData();
#endif
}
| d341ee709f80f8a1976893bbd8653676084e57fc.cu | #include "object/light/spot_light.hpp"
#include <cfloat>
using namespace px;
BaseSpotLight::BaseSpotLight(Point const &pos,
Direction const &direction,
PREC const &half_angle1,
PREC const &half_angle2,
PREC const &falloff)
: _pos(pos), _dir(direction), _falloff(falloff)
{
setAngles(half_angle1, half_angle2);
}
void BaseSpotLight::setPos(Point const &pos)
{
_pos = pos;
}
void BaseSpotLight::setDir(Direction const &direction)
{
_dir = direction;
}
void BaseSpotLight::setAngles(PREC const &half_angle1, PREC const &half_angle2)
{
_inner_ha = half_angle1 < 0 ?
std::fmod(half_angle1, PREC(PI2)) + PREC(PI2) : std::fmod(half_angle1,PREC(PI2));
_outer_ha = half_angle2 < 0 ?
std::fmod(half_angle2, PREC(PI2)) + PREC(PI2) : std::fmod(half_angle2,PREC(PI2));
if (_inner_ha_cosine > PI)
_inner_ha_cosine = PI;
if (_outer_ha_cosine > PI)
_outer_ha_cosine = PI;
if (_outer_ha < _inner_ha)
{
auto tmp = _inner_ha;
_inner_ha = _outer_ha;
_outer_ha = tmp;
}
_inner_ha_cosine = std::cos(_inner_ha);
_outer_ha_cosine = std::cos(_outer_ha);
_multiplier = 1.0 / (_outer_ha_cosine - _inner_ha_cosine);
}
void BaseSpotLight::setFalloff(PREC const &falloff)
{
_falloff = falloff;
}
PX_CUDA_CALLABLE
PREC BaseSpotLight::attenuate(void *const &obj,
PREC const &x, PREC const &y, PREC const &z)
{
auto o = reinterpret_cast<BaseSpotLight*>(obj);
PREC dx = x-o->_pos.x;
PREC dy = y-o->_pos.y;
PREC dz = z-o->_pos.z;
PREC nrm2 = dx*dx + dy*dy + dz*dz;
if (nrm2 < EPSILON)
return FLT_MAX;
PREC nrm = std::sqrt(nrm2);
dx /= nrm;
dy /= nrm;
dz /= nrm;
PREC cosine = o->_dir.x * dx + o->_dir.y * dy + o->_dir.z * dz;
if (cosine >= o->_inner_ha_cosine)
return 1.0/nrm2;
if (cosine > o->_outer_ha_cosine)
return std::pow(((o->_outer_ha_cosine-cosine)*o->_multiplier), o->_falloff)/nrm2;
return 0;
}
Direction BaseSpotLight::dirFromHost(BaseSpotLight *const &obj,
PREC const &x, PREC const &y,
PREC const &z, PREC &dist)
{
auto dx = obj->_pos.x - x;
auto dy = obj->_pos.y - y;
auto dz = obj->_pos.z - z;
dist = std::sqrt(dx*dx + dy*dy + dz*dz);
return {dx, dy, dz};
}
PX_CUDA_CALLABLE
Direction BaseSpotLight::dirFromDevice(void *const &obj, PREC const &x,
PREC const &y, PREC const &z,
PREC &dist,
curandState_t * const &)
{
auto o = reinterpret_cast<BaseSpotLight*>(obj);
auto dx = o->_pos.x - x;
auto dy = o->_pos.y - y;
auto dz = o->_pos.z - z;
dist = std::sqrt(dx*dx + dy*dy + dz*dz);
return {dx, dy, dz};
}
const LightType SpotLight::TYPE = LightType::PointLight;
std::shared_ptr<BaseLight> SpotLight::create(Light const &light,
Point const &pos,
Direction const &direction,
PREC const &half_angle1,
PREC const &half_angle2,
PREC const &falloff)
{
return std::shared_ptr<BaseLight>(new SpotLight(light, pos, direction,
half_angle1, half_angle2,
falloff));
}
SpotLight::SpotLight(Light const &light,
Point const &pos,
Direction const &direction,
PREC const &half_angle1,
PREC const &half_angle2,
PREC const &falloff)
: BaseLight(TYPE, light),
_obj(new BaseSpotLight(pos, direction,
half_angle1, half_angle2,
falloff)),
_gpu_obj(nullptr),
_need_upload(true)
{}
SpotLight::~SpotLight()
{
delete _obj;
#ifdef USE_CUDA
clearGpuData();
#endif
}
PREC SpotLight::attenuate(PREC const &x, PREC const &y, PREC const &z) const
{
return BaseSpotLight::attenuate(_obj, x, y, z);
}
Direction SpotLight::dirFrom(PREC const &x, PREC const &y, PREC const &z, PREC &dist) const
{
return BaseSpotLight::dirFromHost(_obj, x, y, z, dist);
}
void SpotLight::setPos(Point const &pos)
{
_obj->setPos(pos);
#ifdef USE_CUDA
_need_upload = true;
#endif
}
void SpotLight::setDir(Direction const &direction)
{
_obj->setDir(direction);
#ifdef USE_CUDA
_need_upload = true;
#endif
}
void SpotLight::setAngles(PREC const &half_angle1, PREC const &half_angle2)
{
_obj->setAngles(half_angle1, half_angle2);
#ifdef USE_CUDA
_need_upload = true;
#endif
}
void SpotLight::setFalloff(PREC const &falloff)
{
_obj->setFalloff(falloff);
#ifdef USE_CUDA
_need_upload = true;
#endif
}
#ifdef USE_CUDA
__device__ fnAttenuate_t __fn_attenuate_spot_light = BaseSpotLight::attenuate;
__device__ fnDirFrom_t __fn_dir_from_spot_light = BaseSpotLight::dirFromDevice;
#endif
void SpotLight::up2Gpu()
{
#ifdef USE_CUDA
static fnAttenuate_t fn_attenuate_h = nullptr;
static fnDirFrom_t fn_dir_from_h;
if (_need_upload || BaseLight::need_upload)
{
if (dev_ptr == nullptr)
{
PX_CUDA_CHECK(cudaMalloc(&_gpu_obj, sizeof(BaseSpotLight)));
PX_CUDA_CHECK(cudaMalloc(&dev_ptr, sizeof(LightObj)));
}
if (fn_attenuate_h == nullptr)
{
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_attenuate_h, __fn_attenuate_spot_light, sizeof(fnAttenuate_t)));
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_dir_from_h, __fn_dir_from_spot_light, sizeof(fnDirFrom_t)));
}
PX_CUDA_CHECK(cudaMemcpy(_gpu_obj, _obj, sizeof(BaseSpotLight),
cudaMemcpyHostToDevice));
LightObj tmp(_gpu_obj, type, _light, fn_attenuate_h, fn_dir_from_h);
PX_CUDA_CHECK(cudaMemcpy(dev_ptr, &tmp, sizeof(LightObj),
cudaMemcpyHostToDevice));
_need_upload = false;
BaseLight::need_upload = false;
}
#endif
}
void SpotLight::clearGpuData()
{
#ifdef USE_CUDA
if (_gpu_obj != nullptr)
{
PX_CUDA_CHECK(cudaFree(_gpu_obj));
_gpu_obj = nullptr;
}
BaseLight::clearGpuData();
#endif
}
|
770d1e31062e94b93993f768975ce469129cedd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "path_tracer/render.cuh"
#include <shared/scoped_timer.cuh>
#include <sstream>
namespace ppt
{
namespace path_tracer
{
using vec3 = ppt::shared::vec3;
using vec5 = ppt::shared::vec5;
using vec8 = ppt::shared::vec8;
#define RM(row, col, w) row* w + col
#define CM(row, col, h) col* h + row
render::render(int w, int h)
: w(w)
, h(h)
, render_color_bytes(w * h * sizeof(ppt::shared::vec3))
, render_image_bytes(w * h * sizeof(ppt::shared::vec8))
, sample_bytes(w * h * sizeof(unsigned int))
, variance_bytes(w * h * sizeof(float))
{
checkCudaErrors(hipMalloc((void**)&d_color_matrix, render_color_bytes));
checkCudaErrors(hipMalloc((void**)&d_image_matrix, render_image_bytes));
checkCudaErrors(hipMalloc((void**)&d_samples, sample_bytes));
checkCudaErrors(hipMalloc((void**)&d_variance, variance_bytes));
}
render::render(render&& other)
: d_image_matrix(std::move(other.d_image_matrix))
, d_color_matrix(std::move(other.d_color_matrix))
, w(w)
, h(h)
, render_color_bytes(render_color_bytes)
, render_image_bytes(render_image_bytes)
, sample_bytes(sample_bytes)
, variance_bytes(variance_bytes)
{
other.d_color_matrix = nullptr;
other.d_image_matrix = nullptr;
other.d_samples = nullptr;
other.d_variance = nullptr;
}
render::~render()
{
hipFree(d_color_matrix);
hipFree(d_image_matrix);
hipFree(d_samples);
hipFree(d_variance);
}
template <typename T>
void get_vector_representation(std::vector<vec8> h_image_matrix, size_t w, size_t h, std::vector<T>& colors)
{
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
const auto pixel_index = RM(i, j, w);
colors[pixel_index] = T(h_image_matrix[pixel_index].e);
}
}
}
std::vector<vec3> render::get_vector3_representation() const
{
auto colors = std::vector<vec3>(w * h);
auto h_image_matrix = get_vector8_representation();
get_vector_representation<vec3>(h_image_matrix, w, h, colors);
return colors;
}
std::vector<vec8> render::get_vector8_representation() const
{
auto h_image_matrix = std::vector<vec8>(w * h);
h_image_matrix.resize(w * h);
auto bytes = sizeof(vec8) * w * h;
checkCudaErrors(hipMemcpy(&h_image_matrix[0], d_image_matrix, bytes, hipMemcpyDeviceToHost));
return h_image_matrix;
}
std::vector<unsigned char> render::get_byte_representation() const
{
auto h_image_matrix = get_vector8_representation();
auto result = std::vector<unsigned char>(h * w * 4);
for (auto i = 0; i < h; i++)
{
for (auto j = 0; j < w; j++)
{
const auto idx = i * w * 4 + j * 4;
const auto& e = h_image_matrix[(h - i - 1) * w + j];
result[idx + 0] = e[0] * 255.f;
result[idx + 1] = e[1] * 255.f;
result[idx + 2] = e[2] * 255.f;
result[idx + 3] = 1.0f * 255.f;
}
}
return result;
}
} // namespace path_tracer
} // namespace ppt | 770d1e31062e94b93993f768975ce469129cedd6.cu | #include "path_tracer/render.cuh"
#include <shared/scoped_timer.cuh>
#include <sstream>
namespace ppt
{
namespace path_tracer
{
using vec3 = ppt::shared::vec3;
using vec5 = ppt::shared::vec5;
using vec8 = ppt::shared::vec8;
#define RM(row, col, w) row* w + col
#define CM(row, col, h) col* h + row
render::render(int w, int h)
: w(w)
, h(h)
, render_color_bytes(w * h * sizeof(ppt::shared::vec3))
, render_image_bytes(w * h * sizeof(ppt::shared::vec8))
, sample_bytes(w * h * sizeof(unsigned int))
, variance_bytes(w * h * sizeof(float))
{
checkCudaErrors(cudaMalloc((void**)&d_color_matrix, render_color_bytes));
checkCudaErrors(cudaMalloc((void**)&d_image_matrix, render_image_bytes));
checkCudaErrors(cudaMalloc((void**)&d_samples, sample_bytes));
checkCudaErrors(cudaMalloc((void**)&d_variance, variance_bytes));
}
render::render(render&& other)
: d_image_matrix(std::move(other.d_image_matrix))
, d_color_matrix(std::move(other.d_color_matrix))
, w(w)
, h(h)
, render_color_bytes(render_color_bytes)
, render_image_bytes(render_image_bytes)
, sample_bytes(sample_bytes)
, variance_bytes(variance_bytes)
{
other.d_color_matrix = nullptr;
other.d_image_matrix = nullptr;
other.d_samples = nullptr;
other.d_variance = nullptr;
}
render::~render()
{
cudaFree(d_color_matrix);
cudaFree(d_image_matrix);
cudaFree(d_samples);
cudaFree(d_variance);
}
template <typename T>
void get_vector_representation(std::vector<vec8> h_image_matrix, size_t w, size_t h, std::vector<T>& colors)
{
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
const auto pixel_index = RM(i, j, w);
colors[pixel_index] = T(h_image_matrix[pixel_index].e);
}
}
}
std::vector<vec3> render::get_vector3_representation() const
{
auto colors = std::vector<vec3>(w * h);
auto h_image_matrix = get_vector8_representation();
get_vector_representation<vec3>(h_image_matrix, w, h, colors);
return colors;
}
std::vector<vec8> render::get_vector8_representation() const
{
auto h_image_matrix = std::vector<vec8>(w * h);
h_image_matrix.resize(w * h);
auto bytes = sizeof(vec8) * w * h;
checkCudaErrors(cudaMemcpy(&h_image_matrix[0], d_image_matrix, bytes, cudaMemcpyDeviceToHost));
return h_image_matrix;
}
std::vector<unsigned char> render::get_byte_representation() const
{
auto h_image_matrix = get_vector8_representation();
auto result = std::vector<unsigned char>(h * w * 4);
for (auto i = 0; i < h; i++)
{
for (auto j = 0; j < w; j++)
{
const auto idx = i * w * 4 + j * 4;
const auto& e = h_image_matrix[(h - i - 1) * w + j];
result[idx + 0] = e[0] * 255.f;
result[idx + 1] = e[1] * 255.f;
result[idx + 2] = e[2] * 255.f;
result[idx + 3] = 1.0f * 255.f;
}
}
return result;
}
} // namespace path_tracer
} // namespace ppt |
5b2da53781fed80c2338e92662709a10206a2f28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* --------------------------------- Header --------------------------------- */
/**
* @file shapecontainer.cpp
* @brief Set container of shape pointers
*/
/* -------------------------------- Includes -------------------------------- */
# include <algorithm>
# include <chrono>
# include <set>
# include <sstream>
# include <hip/driver_types.h>
# include "cudaerr.cuh"
# include "shape.h"
# include "shapecontainer.h"
# include "triangle.h"
/* ----------------------- Constructors / Destructors ----------------------- */
const unsigned int SHAPE_DIM = 3;
const unsigned int VERT_DIM = 4;
/* ----------------------- Constructors / Destructors ----------------------- */
/**
* @brief Creates an empty shape container
*
* @param void
*
* @return The created shape container
*/
ShapeContainer::ShapeContainer() = default;
/**
* @brief Creates a shape container from an existing shape container
*
* @param &sc The shape container to create from
*
* @return The created shape container
*/
ShapeContainer::ShapeContainer( const ShapeContainer &sc )
{
add( sc );
}
/**
* @brief Shape container destructor
*
* @param void
*
* @return void
*/
ShapeContainer::~ShapeContainer()
{
std::for_each( shapes.begin(), shapes.end(), [this]( Shape *shape )
{
delete shape;
}
);
}
/* -------------------------- Overloaded Operators -------------------------- */
/**
* @brief Assigns the shapes from another shape container to this
* shape container
*
* @param &sc The shape container to assign from
*
* @return This shape container
*/
ShapeContainer &ShapeContainer::operator=( const ShapeContainer &sc )
{
erase();
add( sc );
return *this;
}
/**
* @brief Converts a shape container to a string and writes it to an
* output stream
*
* @param &os The output stream to write to
* @param &sc The shape container to convert
*
* @return The output stream
*/
std::ostream &operator<<( std::ostream &os, const ShapeContainer &sc )
{
sc.out( os );
return os;
}
/* ---------------------------- Public Functions ---------------------------- */
/**
* Pushes this shape container to the GPU device
*/
void ShapeContainer::pushToDevice()
{
// free existing device mallocs if they exist
if ( d_inputShapes != nullptr)
{
HANDLE_CUDA_ERROR(hipFree(d_inputShapes));
}
if ( d_outputShapes != nullptr)
{
HANDLE_CUDA_ERROR(hipFree(d_outputShapes));
}
// malloc new input and output
HANDLE_CUDA_ERROR(
hipMalloc(&d_inputShapes, shapes.size() * SHAPE_DIM * VERT_DIM * sizeof(float))
);
HANDLE_CUDA_ERROR(
hipMalloc(&d_outputShapes, shapes.size() * SHAPE_DIM * VERT_DIM * sizeof(float))
);
// copy each tri to input memory
for (unsigned int shapeIdx = 0; shapeIdx < shapes.size(); shapeIdx++)
{
float shape[SHAPE_DIM][VERT_DIM];
unsigned int shapeOffset = shapeIdx * SHAPE_DIM * VERT_DIM;
for (unsigned int vertIdx = 0; vertIdx < SHAPE_DIM; vertIdx++)
{
for (unsigned int coordIdx = 0; coordIdx < VERT_DIM; coordIdx++)
{
if (coordIdx < VERT_DIM - 1)
{
shape[vertIdx][coordIdx] = (*(shapes[shapeIdx]))[vertIdx][coordIdx];
}
else
{
shape[vertIdx][coordIdx] = 1;
}
}
}
HANDLE_CUDA_ERROR(
hipMemcpy(
(void *) (d_inputShapes + shapeOffset),
(void *) shape,
SHAPE_DIM * VERT_DIM * sizeof(float),
hipMemcpyHostToDevice
)
);
}
}
/**
* @brief Adds a shape to this shape container
*
* @param &shape The shape to add
*
* @return void
*/
void ShapeContainer::add( const Shape &shape )
{
shapes.insert( shapes.end(), shape.clone() );
}
/**
* @brief Adds the elements from another shape container to this
* shape container
*
* @param &sc The shape container to add
*
* @return void
*/
void ShapeContainer::add( const ShapeContainer &sc )
{
std::for_each( sc.shapes.begin(), sc.shapes.end(), [this]( Shape *shape )
{
add( *shape );
}
);
}
/**
* @brief Draws the shapes in this shape container
*
* @param *gc The graphics context to draw to
* @param *vc The view context to draw with
*
* @return void
*/
void ShapeContainer::draw( GraphicsContext *gc, ViewContext *vc ) const
{
auto drawStartTime = std::chrono::high_resolution_clock::now();
// copy view transform to local
float viewTransform[VERT_DIM][VERT_DIM];
for (unsigned int row = 0; row < VERT_DIM; row++)
{
for (unsigned int col = 0; col < VERT_DIM; col++)
{
viewTransform[row][col] = ViewContext::transform[row][col];
}
}
// copy view transform to device
HANDLE_CUDA_ERROR(
hipMemcpy(
( void * ) ViewContext::d_viewTransform,
( void * ) viewTransform,
VERT_DIM * VERT_DIM * sizeof( float ),
hipMemcpyHostToDevice
)
);
// zero output matrix
HANDLE_CUDA_ERROR(
hipMemset(d_outputShapes, 0, shapes.size() * SHAPE_DIM * VERT_DIM * sizeof(float))
);
// run GPU kernel
unsigned int blocks = ceil(shapes.size() / 1024.0);
hipLaunchKernelGGL(( applyViewTransform), dim3(blocks), dim3(1024), 0, 0,
d_inputShapes,
d_outputShapes,
ViewContext::d_viewTransform
);
HANDLE_CUDA_ERROR(hipDeviceSynchronize());
// parse output points into output shapes vector
std::vector<Shape*> parsedShapes;
for (unsigned int shapeIdx = 0; shapeIdx < shapes.size(); shapeIdx++)
{
float shape[SHAPE_DIM][VERT_DIM];
unsigned int shapeOffset = shapeIdx * SHAPE_DIM * VERT_DIM;
HANDLE_CUDA_ERROR(
hipMemcpy(
(void *) shape,
(void *) (d_outputShapes + shapeOffset),
SHAPE_DIM * VERT_DIM * sizeof(float),
hipMemcpyDeviceToHost
)
);
Point3D verts[3];
for (unsigned int vertIdx = 0; vertIdx < SHAPE_DIM; vertIdx++)
{
verts[vertIdx].setX(shape[vertIdx][0]);
verts[vertIdx].setY(shape[vertIdx][1]);
verts[vertIdx].setZ(shape[vertIdx][2]);
}
Triangle * tri = new Triangle(verts[0], verts[1], verts[2]);
parsedShapes.insert(parsedShapes.end(), tri);
}
auto transformEndTime = std::chrono::high_resolution_clock::now();
double transformTime = std::chrono::duration_cast<std::chrono::nanoseconds>(
transformEndTime - drawStartTime
).count() / 1000000.0;
std::cout << "Transform Time: " << transformTime << "ms" << std::endl;
// draw shapes
std::for_each(parsedShapes.begin(), parsedShapes.end(), [gc](Shape *shape)
{
shape->draw(gc);
}
);
auto drawEndTime = std::chrono::high_resolution_clock::now();
double drawTime = std::chrono::duration_cast<std::chrono::nanoseconds>(
drawEndTime - drawStartTime
).count() / 1000000.0;
std::cout << "Total Draw Time: " << drawTime << "ms" << std::endl;
}
/**
* @brief Converts the shapes in this shape container to strings and
* outputs them to an output stream
*
* @param &os The output stream to write to
*
* @return The output stream
*/
std::ostream &ShapeContainer::out( std::ostream &os ) const
{
std::for_each( shapes.begin(), shapes.end(), [&os]( Shape *shape )
{
shape->out( const_cast<std::ostream&>( os ) );
os << std::endl;
}
);
return os;
}
/**
* @brief Removes all shapes from this shape container
*
* @param void
*
* @return void
*/
void ShapeContainer::erase()
{
std::for_each( shapes.begin(), shapes.end(), []( Shape *shape )
{
delete shape;
}
);
shapes.clear();
}
/**
* @brief Gets the size of the shape container
*
* @param void
*
* @return The size of the shape container
*/
unsigned int ShapeContainer::size()
{
return shapes.size();
}
/* ------------------------------ GPU Kernels ------------------------------- */
__global__ void applyViewTransform(
float * inputShapes, float * outputShapes, float * viewTransform
)
{
unsigned int shapeIdx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int shapeOffset = shapeIdx * SHAPE_DIM * VERT_DIM;
// matrix vector multiplication
for (unsigned int vertIdx = 0; vertIdx < SHAPE_DIM; vertIdx++)
{
unsigned int vertOffset = vertIdx * VERT_DIM;
for (unsigned int row = 0; row < VERT_DIM; row++)
{
unsigned int rowOffset = row * VERT_DIM;
for (unsigned int i = 0; i < VERT_DIM; i++)
{
outputShapes[shapeOffset + vertOffset + row] +=
viewTransform[rowOffset + i] * inputShapes[shapeOffset + vertOffset + i];
}
}
}
}
/* -------------------------------------------------------------------------- */ | 5b2da53781fed80c2338e92662709a10206a2f28.cu | /* --------------------------------- Header --------------------------------- */
/**
* @file shapecontainer.cpp
* @brief Set container of shape pointers
*/
/* -------------------------------- Includes -------------------------------- */
# include <algorithm>
# include <chrono>
# include <set>
# include <sstream>
# include <driver_types.h>
# include "cudaerr.cuh"
# include "shape.h"
# include "shapecontainer.h"
# include "triangle.h"
/* ----------------------- Constructors / Destructors ----------------------- */
const unsigned int SHAPE_DIM = 3;
const unsigned int VERT_DIM = 4;
/* ----------------------- Constructors / Destructors ----------------------- */
/**
* @brief Creates an empty shape container
*
* @param void
*
* @return The created shape container
*/
ShapeContainer::ShapeContainer() = default;
/**
* @brief Creates a shape container from an existing shape container
*
* @param &sc The shape container to create from
*
* @return The created shape container
*/
ShapeContainer::ShapeContainer( const ShapeContainer &sc )
{
add( sc );
}
/**
* @brief Shape container destructor
*
* @param void
*
* @return void
*/
ShapeContainer::~ShapeContainer()
{
std::for_each( shapes.begin(), shapes.end(), [this]( Shape *shape )
{
delete shape;
}
);
}
/* -------------------------- Overloaded Operators -------------------------- */
/**
* @brief Assigns the shapes from another shape container to this
* shape container
*
* @param &sc The shape container to assign from
*
* @return This shape container
*/
ShapeContainer &ShapeContainer::operator=( const ShapeContainer &sc )
{
erase();
add( sc );
return *this;
}
/**
* @brief Converts a shape container to a string and writes it to an
* output stream
*
* @param &os The output stream to write to
* @param &sc The shape container to convert
*
* @return The output stream
*/
std::ostream &operator<<( std::ostream &os, const ShapeContainer &sc )
{
sc.out( os );
return os;
}
/* ---------------------------- Public Functions ---------------------------- */
/**
* Pushes this shape container to the GPU device
*/
void ShapeContainer::pushToDevice()
{
// free existing device mallocs if they exist
if ( d_inputShapes != nullptr)
{
HANDLE_CUDA_ERROR(cudaFree(d_inputShapes));
}
if ( d_outputShapes != nullptr)
{
HANDLE_CUDA_ERROR(cudaFree(d_outputShapes));
}
// malloc new input and output
HANDLE_CUDA_ERROR(
cudaMalloc(&d_inputShapes, shapes.size() * SHAPE_DIM * VERT_DIM * sizeof(float))
);
HANDLE_CUDA_ERROR(
cudaMalloc(&d_outputShapes, shapes.size() * SHAPE_DIM * VERT_DIM * sizeof(float))
);
// copy each tri to input memory
for (unsigned int shapeIdx = 0; shapeIdx < shapes.size(); shapeIdx++)
{
float shape[SHAPE_DIM][VERT_DIM];
unsigned int shapeOffset = shapeIdx * SHAPE_DIM * VERT_DIM;
for (unsigned int vertIdx = 0; vertIdx < SHAPE_DIM; vertIdx++)
{
for (unsigned int coordIdx = 0; coordIdx < VERT_DIM; coordIdx++)
{
if (coordIdx < VERT_DIM - 1)
{
shape[vertIdx][coordIdx] = (*(shapes[shapeIdx]))[vertIdx][coordIdx];
}
else
{
shape[vertIdx][coordIdx] = 1;
}
}
}
HANDLE_CUDA_ERROR(
cudaMemcpy(
(void *) (d_inputShapes + shapeOffset),
(void *) shape,
SHAPE_DIM * VERT_DIM * sizeof(float),
cudaMemcpyHostToDevice
)
);
}
}
/**
* @brief Adds a shape to this shape container
*
* @param &shape The shape to add
*
* @return void
*/
void ShapeContainer::add( const Shape &shape )
{
shapes.insert( shapes.end(), shape.clone() );
}
/**
* @brief Adds the elements from another shape container to this
* shape container
*
* @param &sc The shape container to add
*
* @return void
*/
void ShapeContainer::add( const ShapeContainer &sc )
{
std::for_each( sc.shapes.begin(), sc.shapes.end(), [this]( Shape *shape )
{
add( *shape );
}
);
}
/**
* @brief Draws the shapes in this shape container
*
* @param *gc The graphics context to draw to
* @param *vc The view context to draw with
*
* @return void
*/
void ShapeContainer::draw( GraphicsContext *gc, ViewContext *vc ) const
{
auto drawStartTime = std::chrono::high_resolution_clock::now();
// copy view transform to local
float viewTransform[VERT_DIM][VERT_DIM];
for (unsigned int row = 0; row < VERT_DIM; row++)
{
for (unsigned int col = 0; col < VERT_DIM; col++)
{
viewTransform[row][col] = ViewContext::transform[row][col];
}
}
// copy view transform to device
HANDLE_CUDA_ERROR(
cudaMemcpy(
( void * ) ViewContext::d_viewTransform,
( void * ) viewTransform,
VERT_DIM * VERT_DIM * sizeof( float ),
cudaMemcpyHostToDevice
)
);
// zero output matrix
HANDLE_CUDA_ERROR(
cudaMemset(d_outputShapes, 0, shapes.size() * SHAPE_DIM * VERT_DIM * sizeof(float))
);
// run GPU kernel
unsigned int blocks = ceil(shapes.size() / 1024.0);
applyViewTransform<<<blocks, 1024>>>(
d_inputShapes,
d_outputShapes,
ViewContext::d_viewTransform
);
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
// parse output points into output shapes vector
std::vector<Shape*> parsedShapes;
for (unsigned int shapeIdx = 0; shapeIdx < shapes.size(); shapeIdx++)
{
float shape[SHAPE_DIM][VERT_DIM];
unsigned int shapeOffset = shapeIdx * SHAPE_DIM * VERT_DIM;
HANDLE_CUDA_ERROR(
cudaMemcpy(
(void *) shape,
(void *) (d_outputShapes + shapeOffset),
SHAPE_DIM * VERT_DIM * sizeof(float),
cudaMemcpyDeviceToHost
)
);
Point3D verts[3];
for (unsigned int vertIdx = 0; vertIdx < SHAPE_DIM; vertIdx++)
{
verts[vertIdx].setX(shape[vertIdx][0]);
verts[vertIdx].setY(shape[vertIdx][1]);
verts[vertIdx].setZ(shape[vertIdx][2]);
}
Triangle * tri = new Triangle(verts[0], verts[1], verts[2]);
parsedShapes.insert(parsedShapes.end(), tri);
}
auto transformEndTime = std::chrono::high_resolution_clock::now();
double transformTime = std::chrono::duration_cast<std::chrono::nanoseconds>(
transformEndTime - drawStartTime
).count() / 1000000.0;
std::cout << "Transform Time: " << transformTime << "ms" << std::endl;
// draw shapes
std::for_each(parsedShapes.begin(), parsedShapes.end(), [gc](Shape *shape)
{
shape->draw(gc);
}
);
auto drawEndTime = std::chrono::high_resolution_clock::now();
double drawTime = std::chrono::duration_cast<std::chrono::nanoseconds>(
drawEndTime - drawStartTime
).count() / 1000000.0;
std::cout << "Total Draw Time: " << drawTime << "ms" << std::endl;
}
/**
* @brief Converts the shapes in this shape container to strings and
* outputs them to an output stream
*
* @param &os The output stream to write to
*
* @return The output stream
*/
std::ostream &ShapeContainer::out( std::ostream &os ) const
{
std::for_each( shapes.begin(), shapes.end(), [&os]( Shape *shape )
{
shape->out( const_cast<std::ostream&>( os ) );
os << std::endl;
}
);
return os;
}
/**
* @brief Removes all shapes from this shape container
*
* @param void
*
* @return void
*/
void ShapeContainer::erase()
{
std::for_each( shapes.begin(), shapes.end(), []( Shape *shape )
{
delete shape;
}
);
shapes.clear();
}
/**
* @brief Gets the size of the shape container
*
* @param void
*
* @return The size of the shape container
*/
unsigned int ShapeContainer::size()
{
return shapes.size();
}
/* ------------------------------ GPU Kernels ------------------------------- */
__global__ void applyViewTransform(
float * inputShapes, float * outputShapes, float * viewTransform
)
{
unsigned int shapeIdx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int shapeOffset = shapeIdx * SHAPE_DIM * VERT_DIM;
// matrix vector multiplication
for (unsigned int vertIdx = 0; vertIdx < SHAPE_DIM; vertIdx++)
{
unsigned int vertOffset = vertIdx * VERT_DIM;
for (unsigned int row = 0; row < VERT_DIM; row++)
{
unsigned int rowOffset = row * VERT_DIM;
for (unsigned int i = 0; i < VERT_DIM; i++)
{
outputShapes[shapeOffset + vertOffset + row] +=
viewTransform[rowOffset + i] * inputShapes[shapeOffset + vertOffset + i];
}
}
}
}
/* -------------------------------------------------------------------------- */ |
7a37965c7cafa5cd5c989dff795f6cf2b0603cf4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_cuda.h>
#include <math.h>
#if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// CUDA standard includes
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
#include "bodysystem.h"
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
hipError_t setSofteningSquared(float softeningSq)
{
return hipMemcpyToSymbol(softeningSquared,
&softeningSq,
sizeof(float), 0,
hipMemcpyHostToDevice);
}
hipError_t setSofteningSquared(double softeningSq)
{
return hipMemcpyToSymbol(softeningSquared_fp64,
&softeningSq,
sizeof(double), 0,
hipMemcpyHostToDevice);
}
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<typename T>
__device__ T rsqrt_T(T x) // square root - half precision
{
return rsqrt(x);
}
template<>
__device__ float rsqrt_T<float>(float x) // square root - single precision
{
return rsqrtf(x);
}
template<>
__device__ double rsqrt_T<double>(double x) // square root - double precision
{
return rsqrt(x);
}
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ T getSofteningSquared() // epsilon squared - single precision
{
return softeningSquared;
}
template <>
__device__ double getSofteningSquared<double>() // epsilon squared - double precision
{
return softeningSquared_fp64;
}
template <typename T>
struct DeviceData
{
T *dPos[2]; // mapped host pointers
T *dVel;
hipEvent_t event;
unsigned int offset;
unsigned int numBodies;
};
template <typename T>
__device__ typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai, // kernel to coompute pair-wise interactions
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// distance r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai; // acceleration
}
template <typename T>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numTiles, cg::thread_block cta)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
for (int tile = 0; tile < numTiles; tile++)
{
sharedPos[threadIdx.x] = positions[tile * blockDim.x + threadIdx.x];
cg::sync(cta);
// This is the "tile_calculation" from the text along with the unrolling optimizations.
#pragma unroll 128
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
acc = bodyBodyInteraction<T>(acc, bodyPos, sharedPos[counter]);
}
cg::sync(cta);
}
return acc;
}
template<typename T>
__global__ void
integrateBodies(typename vec4<T>::Type *__restrict__ newPos, // velocity-verlet leapfrog scheme to obtain velocities and positions
typename vec4<T>::Type *__restrict__ oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int numTiles)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
{
return;
}
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T>(position,
oldPos,
numTiles, cta);
// acceleration = force / mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * (deltaTime / 2); // v = v0 + a * (dt/2)
velocity.y += accel.y * (deltaTime / 2);
velocity.z += accel.z * (deltaTime / 2);
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime; // x = x0 + v * t
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
velocity.x += accel.x * (deltaTime / 2); // v = v0 + a * (dt/2)
velocity.y += accel.y * (deltaTime / 2);
velocity.z += accel.z * (deltaTime / 2);
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
template <typename T>
void integrateNbodySystem(DeviceData<T> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int blockSize,
bool bUsePBO)
{
if (bUsePBO)
{
checkCudaErrors(hipGraphicsResourceSetMapFlags(pgres[currentRead], hipGraphicsMapFlagsReadOnly));
checkCudaErrors(hipGraphicsResourceSetMapFlags(pgres[1-currentRead], hipGraphicsMapFlagsWriteDiscard));
checkCudaErrors(hipGraphicsMapResources(2, pgres, 0));
size_t bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[currentRead]), &bytes, pgres[currentRead]));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[1-currentRead]), &bytes, pgres[1-currentRead]));
}
for (unsigned int dev = 0; dev != numDevices; dev++)
{
if (numDevices > 1)
{
hipSetDevice(dev);
}
int numBlocks = (deviceData[dev].numBodies + blockSize-1) / blockSize; // this parameter controls the number of thread blocks to be assigned
int numTiles = (numBodies + blockSize - 1) / blockSize; // this paramter controls the granularity at the tile level (parameter 'p')
int sharedMemSize = blockSize * 4 * sizeof(T); // 4 floats for pos
hipLaunchKernelGGL(( integrateBodies<T>), dim3(numBlocks), dim3(blockSize), sharedMemSize , 0,
(typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type *)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type *)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numTiles);
if (numDevices > 1)
{
checkCudaErrors(hipEventRecord(deviceData[dev].event));
// MJH: Hack on older driver versions to force kernel launches to flush!
hipStreamQuery(0);
}
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
if (numDevices > 1)
{
for (unsigned int dev = 0; dev < numDevices; dev++)
{
checkCudaErrors(hipEventSynchronize(deviceData[dev].event));
}
}
if (bUsePBO)
{
checkCudaErrors(hipGraphicsUnmapResources(2, pgres, 0));
}
}
// Explicit specializations needed to generate code
template void integrateNbodySystem<float>(DeviceData<float> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int blockSize,
bool bUsePBO);
template void integrateNbodySystem<double>(DeviceData<double> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int blockSize,
bool bUsePBO);
| 7a37965c7cafa5cd5c989dff795f6cf2b0603cf4.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_cuda.h>
#include <math.h>
#if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// CUDA standard includes
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
#include "bodysystem.h"
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
cudaError_t setSofteningSquared(float softeningSq)
{
return cudaMemcpyToSymbol(softeningSquared,
&softeningSq,
sizeof(float), 0,
cudaMemcpyHostToDevice);
}
cudaError_t setSofteningSquared(double softeningSq)
{
return cudaMemcpyToSymbol(softeningSquared_fp64,
&softeningSq,
sizeof(double), 0,
cudaMemcpyHostToDevice);
}
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<typename T>
__device__ T rsqrt_T(T x) // square root - half precision
{
return rsqrt(x);
}
template<>
__device__ float rsqrt_T<float>(float x) // square root - single precision
{
return rsqrtf(x);
}
template<>
__device__ double rsqrt_T<double>(double x) // square root - double precision
{
return rsqrt(x);
}
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ T getSofteningSquared() // epsilon squared - single precision
{
return softeningSquared;
}
template <>
__device__ double getSofteningSquared<double>() // epsilon squared - double precision
{
return softeningSquared_fp64;
}
template <typename T>
struct DeviceData
{
T *dPos[2]; // mapped host pointers
T *dVel;
cudaEvent_t event;
unsigned int offset;
unsigned int numBodies;
};
template <typename T>
__device__ typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai, // kernel to coompute pair-wise interactions
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// distance r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai; // acceleration
}
template <typename T>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numTiles, cg::thread_block cta)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
for (int tile = 0; tile < numTiles; tile++)
{
sharedPos[threadIdx.x] = positions[tile * blockDim.x + threadIdx.x];
cg::sync(cta);
// This is the "tile_calculation" from the text along with the unrolling optimizations.
#pragma unroll 128
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
acc = bodyBodyInteraction<T>(acc, bodyPos, sharedPos[counter]);
}
cg::sync(cta);
}
return acc;
}
template<typename T>
__global__ void
integrateBodies(typename vec4<T>::Type *__restrict__ newPos, // velocity-verlet leapfrog scheme to obtain velocities and positions
typename vec4<T>::Type *__restrict__ oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int numTiles)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
{
return;
}
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T>(position,
oldPos,
numTiles, cta);
// acceleration = force / mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * (deltaTime / 2); // v = v0 + a * (dt/2)
velocity.y += accel.y * (deltaTime / 2);
velocity.z += accel.z * (deltaTime / 2);
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime; // x = x0 + v * t
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
velocity.x += accel.x * (deltaTime / 2); // v = v0 + a * (dt/2)
velocity.y += accel.y * (deltaTime / 2);
velocity.z += accel.z * (deltaTime / 2);
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
template <typename T>
void integrateNbodySystem(DeviceData<T> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int blockSize,
bool bUsePBO)
{
if (bUsePBO)
{
checkCudaErrors(cudaGraphicsResourceSetMapFlags(pgres[currentRead], cudaGraphicsMapFlagsReadOnly));
checkCudaErrors(cudaGraphicsResourceSetMapFlags(pgres[1-currentRead], cudaGraphicsMapFlagsWriteDiscard));
checkCudaErrors(cudaGraphicsMapResources(2, pgres, 0));
size_t bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[currentRead]), &bytes, pgres[currentRead]));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[1-currentRead]), &bytes, pgres[1-currentRead]));
}
for (unsigned int dev = 0; dev != numDevices; dev++)
{
if (numDevices > 1)
{
cudaSetDevice(dev);
}
int numBlocks = (deviceData[dev].numBodies + blockSize-1) / blockSize; // this parameter controls the number of thread blocks to be assigned
int numTiles = (numBodies + blockSize - 1) / blockSize; // this paramter controls the granularity at the tile level (parameter 'p')
int sharedMemSize = blockSize * 4 * sizeof(T); // 4 floats for pos
integrateBodies<T><<< numBlocks, blockSize, sharedMemSize >>>
((typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type *)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type *)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numTiles);
if (numDevices > 1)
{
checkCudaErrors(cudaEventRecord(deviceData[dev].event));
// MJH: Hack on older driver versions to force kernel launches to flush!
cudaStreamQuery(0);
}
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
if (numDevices > 1)
{
for (unsigned int dev = 0; dev < numDevices; dev++)
{
checkCudaErrors(cudaEventSynchronize(deviceData[dev].event));
}
}
if (bUsePBO)
{
checkCudaErrors(cudaGraphicsUnmapResources(2, pgres, 0));
}
}
// Explicit specializations needed to generate code
template void integrateNbodySystem<float>(DeviceData<float> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int blockSize,
bool bUsePBO);
template void integrateNbodySystem<double>(DeviceData<double> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int blockSize,
bool bUsePBO);
|
377af7f06ebf386b8bb721cd8ffbfbdb0167f6f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/job/parallel_desc.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/core/embedding/hash_functions.cuh"
#include "oneflow/core/embedding/embedding_manager.h"
#include "oneflow/core/control/ctrl_client.h"
namespace oneflow {
namespace {
template<typename K>
struct TableEntry {
K key;
uint32_t value;
};
template<typename K, typename V, typename IDX, typename HASH>
__global__ void HashTableUniqueAndPartitionPairs(
const uint32_t table_capacity, const uint32_t num_keys, int32_t num_partition,
IDX* unique_counts, TableEntry<K>* table, const K* keys, const V* values,
K* partitioned_unique_keys, V* partitioned_unique_values, IDX* reverse_index,
bool need_process_values, int32_t* is_kernel_start) {
CUDA_1D_KERNEL_LOOP_T(uint32_t, i, num_keys) {
IDX r_index_plus_one = 0;
const K key = keys[i];
size_t key_hash = HASH()(key);
uint32_t partition_id = key_hash % num_partition;
IDX* unique_count = unique_counts + partition_id;
K* unique_keys = partitioned_unique_keys + partition_id * num_keys;
uint32_t pos = key_hash % table_capacity;
const K key_hi = (key | 0x1);
const K key_lo = (key & 0x1);
uint32_t counter = 0;
while (r_index_plus_one == 0) {
bool prob_next = false;
K* key_ptr = &table[pos].key;
volatile uint32_t* table_value_ptr = &table[pos].value;
const K old_key = cuda::atomic::CAS(key_ptr, 0, key_hi);
if (old_key == 0) {
IDX unique_pos = cuda::atomic::Add(unique_count, 1);
r_index_plus_one = unique_pos + 1;
unique_keys[unique_pos] = key;
if (need_process_values) {
partitioned_unique_values[partition_id * num_keys + unique_pos] = values[i];
}
*table_value_ptr = ((r_index_plus_one << 1U) | key_lo);
} else if (old_key == key_hi) {
const uint32_t value = *table_value_ptr;
if (value == 0) {
// do nothing
} else if ((value & 0x1) == key_lo) {
r_index_plus_one = (value >> 1U);
} else {
prob_next = true;
}
} else {
prob_next = true;
}
if (prob_next) {
pos += 1;
counter += 1;
if (pos >= table_capacity) { pos -= table_capacity; }
if (counter >= table_capacity) { __trap(); }
}
}
reverse_index[i] = partition_id * num_keys + r_index_plus_one - 1;
}
}
template<typename K, typename U, typename IDX, int N>
struct Param {
IDX* num_unique[N];
K* unique_ids[N];
U* unique_table_ids[N];
int32_t* is_kernel_start[N];
IDX* num_unique_matrix;
int32_t* counter;
};
template<typename T, int pack_size>
struct alignas(sizeof(T) * pack_size) Pack {
T elem[pack_size];
};
template<typename K, typename V, typename IDX, int N, int pack_size>
__global__ void BarrierAndMemset(int32_t parallel_id, int32_t parallel_num,
Param<K, V, IDX, N> param, Pack<char, pack_size>* workspace_ptr,
size_t workspace_num_pack, IDX* counter, int num_counter) {
int count;
if (blockIdx.x == 0) {
count = param.is_kernel_start[parallel_id][parallel_id];
if (threadIdx.x < parallel_num) {
volatile int32_t* start_f = param.is_kernel_start[parallel_id];
start_f[threadIdx.x] = count + 1;
}
}
Pack<char, pack_size> pack_value;
for (int i = 0; i < pack_size; ++i) { pack_value.elem[i] = static_cast<char>(0); }
CUDA_1D_KERNEL_LOOP(i, workspace_num_pack) { workspace_ptr[i] = pack_value; }
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_id < num_counter) { counter[global_thread_id] = 0; }
if (blockIdx.x == 0) {
if (threadIdx.x < parallel_num) {
volatile int32_t* remote_start_f = param.is_kernel_start[threadIdx.x];
while (remote_start_f[parallel_id] < count + 1) {}
}
}
}
template<typename K, typename V, typename IDX, typename HASH, int N>
__global__ void HashTableUniquePairs(const uint32_t table_capacity, const uint32_t num_ids,
int32_t parallel_num, int32_t parallel_id, IDX* unique_count,
TableEntry<K>* table, Param<K, V, IDX, N> param,
K* unique_keys, V* unique_values, IDX* reverse_index,
bool need_process_values) {
#pragma unroll 1
for (int i = 0; i < parallel_num; ++i) {
int rank_id = (parallel_id + i) % parallel_num;
const IDX* num_uniques = param.num_unique[rank_id];
CUDA_1D_KERNEL_LOOP_T(int, rank_index, num_uniques[parallel_id]) {
const IDX* num_uniques = param.num_unique[rank_id];
// if (rank_index >= num_uniques[parallel_id]) { continue; }
const K* keys = param.unique_ids[rank_id];
const V* values = param.unique_table_ids[rank_id];
IDX index_offset = 0;
for (int k = 0; k < rank_id; ++k) { index_offset += param.num_unique[k][parallel_id]; }
IDX r_index_plus_one = 0;
const K key = keys[rank_index];
size_t key_hash = HASH()(key);
uint32_t pos = key_hash % table_capacity;
const K key_hi = (key | 0x1);
const K key_lo = (key & 0x1);
uint32_t counter = 0;
while (r_index_plus_one == 0) {
bool prob_next = false;
K* key_ptr = &table[pos].key;
volatile uint32_t* table_value_ptr = &table[pos].value;
const K old_key = cuda::atomic::CAS(key_ptr, 0, key_hi);
if (old_key == 0) {
IDX unique_pos = cuda::atomic::Add(unique_count, 1);
r_index_plus_one = unique_pos + 1;
unique_keys[unique_pos] = key;
if (need_process_values) { unique_values[unique_pos] = values[rank_index]; }
*table_value_ptr = ((r_index_plus_one << 1U) | key_lo);
} else if (old_key == key_hi) {
const uint32_t value = *table_value_ptr;
if (value == 0) {
// do nothing
} else if ((value & 0x1) == key_lo) {
r_index_plus_one = (value >> 1U);
} else {
prob_next = true;
}
} else {
prob_next = true;
}
if (prob_next) {
pos += 1;
counter += 1;
if (pos >= table_capacity) { pos -= table_capacity; }
if (counter >= table_capacity) { __trap(); }
}
}
reverse_index[rank_index + index_offset] = r_index_plus_one - 1;
if (rank_index < parallel_num) {
param.num_unique_matrix[i * parallel_num + rank_index] = param.num_unique[i][rank_index];
}
}
}
}
template<typename U, typename IDX, int pack_size>
__global__ void GenerateTableIdsAndMemsetUniqueWorkspace(int32_t elem_cnt, int32_t num_tables,
U* table_ids,
Pack<char, pack_size>* workspace_ptr,
size_t workspace_num_pack, IDX* counter,
int num_counter) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) { table_ids[i] = i % num_tables; }
Pack<char, pack_size> pack_value;
for (int i = 0; i < pack_size; ++i) { pack_value.elem[i] = static_cast<char>(0); }
CUDA_1D_KERNEL_LOOP(i, workspace_num_pack) { workspace_ptr[i] = pack_value; }
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_id < num_counter) { counter[global_thread_id] = 0; }
}
template<typename K, typename V, typename IDX, typename HASH>
void UniqueAndPartition(hipStream_t cuda_stream, int64_t num_blocks, int64_t num_ids,
size_t capacity, int64_t num_partition, const K* ids, const V* table_ids,
IDX* num_partitioned_unique_ids_ptr, K* partitioned_unique_ids,
V* partitioned_unique_table_ids, IDX* inverse_unique_partition_indices,
void* workspace_ptr, size_t workspace_bytes, bool need_process_table_ids,
int32_t* is_kernel_start_ptr) {
size_t table_capacity_bytes = capacity * sizeof(TableEntry<K>);
CHECK_GE(workspace_bytes, table_capacity_bytes);
hipLaunchKernelGGL(( HashTableUniqueAndPartitionPairs<K, V, IDX, HASH>), dim3(num_blocks), dim3(1024), 0, cuda_stream,
capacity, num_ids, num_partition, num_partitioned_unique_ids_ptr,
reinterpret_cast<TableEntry<K>*>(workspace_ptr), ids, table_ids, partitioned_unique_ids,
partitioned_unique_table_ids, inverse_unique_partition_indices, need_process_table_ids,
is_kernel_start_ptr);
}
enum class IdShuffleBufferType { kTableIds = 0, kWorkspace, kMaxType };
template<typename K, typename U, typename IDX>
class IdShuffleTmpBufferManager final {
public:
OF_DISALLOW_COPY_AND_MOVE(IdShuffleTmpBufferManager);
IdShuffleTmpBufferManager(void* ptr, const int64_t num_ids, const int64_t parallel_num,
bool need_table_ids, bool need_process_table_ids)
: offset_(0),
offsets_(static_cast<size_t>(IdShuffleBufferType::kMaxType), -1),
sizes_(static_cast<size_t>(IdShuffleBufferType::kMaxType)),
ptr_(ptr) {
const int64_t num_table_ids = need_process_table_ids ? num_ids : 0;
const size_t table_ids_bytes = need_table_ids ? num_ids * sizeof(U) : 0;
AllocBuffer(IdShuffleBufferType::kTableIds, table_ids_bytes);
const size_t hash_table_capacity = parallel_num * num_ids;
AllocBuffer(IdShuffleBufferType::kWorkspace, hash_table_capacity * sizeof(TableEntry<K>));
}
template<typename T = void>
T* Ptr(IdShuffleBufferType type) {
CHECK(ptr_ != nullptr);
int64_t offset = offsets_.at(static_cast<size_t>(type));
CHECK_NE(offset, -1);
return reinterpret_cast<T*>(reinterpret_cast<char*>(ptr_) + offset);
}
int64_t Size(IdShuffleBufferType type) { return sizes_.at(static_cast<size_t>(type)); }
size_t TotalBufferSize() const { return offset_; }
private:
void AllocBuffer(IdShuffleBufferType type, size_t size) {
const size_t type_id = static_cast<size_t>(type);
CHECK_EQ(offsets_.at(type_id), -1);
offsets_.at(type_id) = offset_;
sizes_.at(type_id) = size;
offset_ += GetCudaAlignedSize(size);
}
size_t offset_;
std::vector<int64_t> offsets_;
std::vector<int64_t> sizes_;
void* ptr_;
};
template<typename K, typename U, typename IDX>
class DataShuffleKernelState final : public user_op::OpKernelState {
public:
explicit DataShuffleKernelState(user_op::KernelInitContext* ctx)
: device_index_(-1),
parallel_desc_(ctx->parallel_desc()),
parallel_id_(ctx->parallel_ctx().parallel_id()) {
OF_CUDA_CHECK(hipGetDevice(&device_index_));
int64_t parallel_num = parallel_desc_.parallel_num();
OF_CUDA_CHECK(
hipHostMalloc(&host_num_unique_matrix_, parallel_num * parallel_num * sizeof(IDX)));
OF_CUDA_CHECK(hipHostMalloc(&host_cur_rank_num_unique_, sizeof(IDX)));
const std::string& embedding_name = ctx->Attr<std::string>("embedding_name");
const int64_t parallel_id = parallel_id_;
embedding_state_ = Singleton<embedding::EmbeddingManager>::Get()->GetEmbeddingState(
embedding_name, parallel_id);
const int64_t num_ids = ctx->TensorDesc4ArgNameAndIndex("ids", 0)->shape().elem_cnt();
num_partitioned_unique_size_ = GetCudaAlignedSize(parallel_num * sizeof(IDX));
partitioned_unique_ids_size_ = GetCudaAlignedSize(parallel_num * num_ids * sizeof(K));
partitioned_unique_table_ids_size_ = GetCudaAlignedSize(parallel_num * num_ids * sizeof(U));
is_kernel_start_size_ = GetCudaAlignedSize(parallel_num * sizeof(int32_t));
size_t buffer_size = num_partitioned_unique_size_ + partitioned_unique_ids_size_
+ partitioned_unique_table_ids_size_ + is_kernel_start_size_;
buffer_ptrs_.resize(parallel_num);
hipMalloc(&buffer_ptrs_.at(parallel_id), buffer_size);
hipMemset(buffer_ptrs_.at(parallel_id), 0, buffer_size);
}
~DataShuffleKernelState() {
CudaCurrentDeviceGuard guard(device_index_);
OF_CUDA_CHECK(hipHostFree(host_cur_rank_num_unique_));
OF_CUDA_CHECK(hipHostFree(host_num_unique_matrix_));
OF_CUDA_CHECK(hipFree(buffer_ptrs_.at(parallel_id_)));
}
std::vector<void*>* BufferPtrs() { return &buffer_ptrs_; }
IDX* HostNumUniqueMatrix() { return host_num_unique_matrix_; }
IDX* HostCurRankNumUnique() { return host_cur_rank_num_unique_; }
embedding::EmbeddingState* EmbeddingState() { return embedding_state_; }
IDX* NumPartitionedUnique(int64_t parallel_id) {
return reinterpret_cast<IDX*>(buffer_ptrs_.at(parallel_id));
}
K* PartitionedUniqueIds(int64_t parallel_id) {
return reinterpret_cast<K*>(reinterpret_cast<char*>(buffer_ptrs_.at(parallel_id))
+ num_partitioned_unique_size_);
}
U* PartitionedUniqueTableIds(int64_t parallel_id) {
return reinterpret_cast<U*>(reinterpret_cast<char*>(buffer_ptrs_.at(parallel_id))
+ num_partitioned_unique_size_ + partitioned_unique_ids_size_);
}
int32_t* IsKernelStart(int64_t parallel_id) {
return reinterpret_cast<int32_t*>(reinterpret_cast<char*>(buffer_ptrs_.at(parallel_id))
+ num_partitioned_unique_size_ + partitioned_unique_ids_size_
+ partitioned_unique_table_ids_size_);
}
private:
int device_index_;
ParallelDesc parallel_desc_;
int64_t parallel_id_;
IDX* host_num_unique_matrix_;
IDX* host_cur_rank_num_unique_;
std::vector<void*> buffer_ptrs_;
size_t num_partitioned_unique_size_;
size_t partitioned_unique_ids_size_;
size_t partitioned_unique_table_ids_size_;
size_t is_kernel_start_size_;
embedding::EmbeddingState* embedding_state_;
};
void GetPtrs(user_op::KernelComputeContext* ctx, std::vector<void*>* buffer_ptrs) {
const int64_t parallel_id = ctx->parallel_ctx().parallel_id();
const int64_t parallel_num = ctx->parallel_ctx().parallel_num();
std::string name = ctx->op_name();
hipIpcMemHandle_t handle;
OF_CUDA_CHECK(hipIpcGetMemHandle(&handle, buffer_ptrs->at(parallel_id)));
Singleton<CtrlClient>::Get()->PushKV(
name + std::to_string(parallel_id),
std::string(reinterpret_cast<const char*>(&handle), sizeof(hipIpcMemHandle_t)));
for (int64_t i = 0; i < parallel_num; ++i) {
std::string key = name + std::to_string(i);
if (parallel_id != i) {
hipIpcMemHandle_t handle;
Singleton<CtrlClient>::Get()->PullKV(key, [&handle](const std::string& val) {
memcpy(&handle, val.data(), sizeof(hipIpcMemHandle_t));
});
OF_CUDA_CHECK(
hipIpcOpenMemHandle(&buffer_ptrs->at(i), handle, hipIpcMemLazyEnablePeerAccess));
}
}
}
template<typename K, typename V, typename IDX, int N>
__global__ void BarrierAndComputeOut(int32_t parallel_id, int32_t parallel_num, int32_t num_ids,
Param<K, V, IDX, N> param, IDX* num_partitioned_unique,
IDX* inverse_ptr, IDX* num_unique_matrix,
IDX* host_num_unique_matrix, IDX* cur_rank_num_unique,
IDX* host_cur_rank_num_unique) {
int count;
if (blockIdx.x == 0) {
count = param.is_kernel_start[parallel_id][parallel_id];
if (threadIdx.x < parallel_num) {
volatile int32_t* start_f = param.is_kernel_start[parallel_id];
start_f[threadIdx.x] = count + 1;
}
}
if (parallel_num > 1) {
CUDA_1D_KERNEL_LOOP(i, num_ids) {
int inverse_indice = inverse_ptr[i];
int partition_id = inverse_indice / num_ids;
int partition_indice = inverse_indice - partition_id * num_ids;
int new_offset = 0;
for (int k = 0; k < partition_id; ++k) { new_offset += num_partitioned_unique[k]; }
inverse_ptr[i] = new_offset + partition_indice;
}
}
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_id < parallel_num * parallel_num) {
host_num_unique_matrix[global_thread_id] = num_unique_matrix[global_thread_id];
}
if (global_thread_id == 0) {
host_cur_rank_num_unique[global_thread_id] = cur_rank_num_unique[global_thread_id];
}
if (blockIdx.x == 0) {
if (threadIdx.x < parallel_num) {
volatile int32_t* remote_start_f = param.is_kernel_start[threadIdx.x];
while (remote_start_f[parallel_id] < count + 1) {}
}
}
}
} // namespace
template<typename K, typename U, typename IDX>
class IdShuffleP2PKernel final : public user_op::OpKernel {
public:
IdShuffleP2PKernel() : current_iter_(0){};
~IdShuffleP2PKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return std::make_shared<DataShuffleKernelState<K, U, IDX>>(ctx);
}
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state,
const user_op::OpKernelCache*) const override {
auto* kernel_state = dynamic_cast<DataShuffleKernelState<K, U, IDX>*>(state);
CHECK(kernel_state != nullptr);
const user_op::Tensor* ids = ctx->Tensor4ArgNameAndIndex("ids", 0);
user_op::Tensor* num_unique_matrix = ctx->Tensor4ArgNameAndIndex("num_unique_matrix", 0);
user_op::Tensor* inverse_unique_partition_indices =
ctx->Tensor4ArgNameAndIndex("inverse_unique_partition_indices", 0);
user_op::Tensor* cur_rank_num_unique = ctx->Tensor4ArgNameAndIndex("cur_rank_num_unique", 0);
user_op::Tensor* cur_rank_unique_ids = ctx->Tensor4ArgNameAndIndex("cur_rank_unique_ids", 0);
user_op::Tensor* cur_rank_unique_table_ids =
ctx->Tensor4ArgNameAndIndex("cur_rank_unique_table_ids", 0);
user_op::Tensor* cur_rank_inverse_indices =
ctx->Tensor4ArgNameAndIndex("cur_rank_inverse_indices", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t num_tables = ctx->Attr<int32_t>("num_tables");
const bool has_table_ids = ctx->has_input("table_ids", 0);
const bool need_gen_table_ids = (!has_table_ids && num_tables > 1);
const bool need_process_table_ids = (has_table_ids || num_tables > 1);
const int64_t num_ids = ids->shape_view().elem_cnt();
const int64_t parallel_num = ctx->parallel_ctx().parallel_num();
const int64_t parallel_id = ctx->parallel_ctx().parallel_id();
hipStream_t cuda_stream = ctx->stream()->As<ep::CudaStream>()->cuda_stream();
IdShuffleTmpBufferManager<K, U, IDX> buffer_manager(
tmp_buffer->mut_dptr(), num_ids, parallel_num, need_gen_table_ids, need_process_table_ids);
CHECK_GE(tmp_buffer->shape_view().elem_cnt(), buffer_manager.TotalBufferSize());
if (current_iter_ == 0) { GetPtrs(ctx, kernel_state->BufferPtrs()); }
const int num_blocks =
2 * ctx->stream()->As<ep::CudaStream>()->device_properties().multiProcessorCount;
IDX* num_partitioned_unique = kernel_state->NumPartitionedUnique(parallel_id);
K* partitioned_unique_ids = kernel_state->PartitionedUniqueIds(parallel_id);
U* partitioned_unique_table_ids = kernel_state->PartitionedUniqueTableIds(parallel_id);
IDX* num_unique_matrix_ptr = reinterpret_cast<IDX*>(num_unique_matrix->mut_dptr());
size_t hash_table_capacity = parallel_num * num_ids;
void* workspace_ptr = buffer_manager.Ptr(IdShuffleBufferType::kWorkspace);
size_t workspace_size = buffer_manager.Size(IdShuffleBufferType::kWorkspace);
const U* table_ids_ptr;
bool skip_memset = false;
if (has_table_ids) {
const user_op::Tensor* table_ids = ctx->Tensor4ArgNameAndIndex("table_ids", 0);
table_ids_ptr = reinterpret_cast<const U*>(table_ids->dptr());
} else if (need_gen_table_ids) {
CHECK_EQ(workspace_size % 16, 0);
CHECK_EQ(reinterpret_cast<std::uintptr_t>(workspace_ptr) % 16, 0);
hipLaunchKernelGGL(( GenerateTableIdsAndMemsetUniqueWorkspace<U, IDX, 16>), dim3(num_blocks), dim3(1024), 0, cuda_stream,
num_ids, num_tables, buffer_manager.template Ptr<U>(IdShuffleBufferType::kTableIds),
reinterpret_cast<Pack<char, 16>*>(workspace_ptr), workspace_size / 16,
num_partitioned_unique, parallel_num);
table_ids_ptr = buffer_manager.template Ptr<U>(IdShuffleBufferType::kTableIds);
skip_memset = true;
} else {
table_ids_ptr = nullptr;
}
if (!skip_memset) {
OF_CUDA_CHECK(hipMemsetAsync(workspace_ptr, 0, workspace_size, cuda_stream));
OF_CUDA_CHECK(
hipMemsetAsync(num_partitioned_unique, 0, parallel_num * sizeof(IDX), cuda_stream));
}
UniqueAndPartition<K, U, IDX, embedding::ShardingHash>(
cuda_stream, num_blocks, num_ids, hash_table_capacity, parallel_num,
reinterpret_cast<const K*>(ids->dptr()), table_ids_ptr, num_partitioned_unique,
partitioned_unique_ids, partitioned_unique_table_ids,
reinterpret_cast<IDX*>(inverse_unique_partition_indices->mut_dptr()), workspace_ptr,
workspace_size, need_process_table_ids, kernel_state->IsKernelStart(parallel_id));
IDX* cur_rank_num_unique_ids_ptr = reinterpret_cast<IDX*>(cur_rank_num_unique->mut_dptr());
Param<K, U, IDX, 8> param;
CHECK_LE(parallel_num, 8);
for (int i = 0; i < parallel_num; ++i) {
param.num_unique[i] = kernel_state->NumPartitionedUnique(i);
param.unique_ids[i] = kernel_state->PartitionedUniqueIds(i) + parallel_id * num_ids;
param.unique_table_ids[i] =
kernel_state->PartitionedUniqueTableIds(i) + parallel_id * num_ids;
param.is_kernel_start[i] = kernel_state->IsKernelStart(i);
}
param.num_unique_matrix = num_unique_matrix_ptr;
CHECK_EQ(workspace_size % 16, 0);
CHECK_EQ(reinterpret_cast<std::uintptr_t>(workspace_ptr) % 16, 0);
int workspace_num_pack = workspace_size / 16;
hipLaunchKernelGGL(( BarrierAndMemset), dim3(num_blocks), dim3(1024), 0, cuda_stream,
parallel_id, parallel_num, param, reinterpret_cast<Pack<char, 16>*>(workspace_ptr),
workspace_num_pack, cur_rank_num_unique_ids_ptr, 1);
hipLaunchKernelGGL(( HashTableUniquePairs<K, U, IDX, embedding::LocalUniqueHash>)
, dim3(num_blocks), dim3(1024), 0, cuda_stream,
hash_table_capacity, num_ids, parallel_num, parallel_id, cur_rank_num_unique_ids_ptr,
reinterpret_cast<TableEntry<K>*>(workspace_ptr), param,
reinterpret_cast<K*>(cur_rank_unique_ids->mut_dptr()),
reinterpret_cast<U*>(cur_rank_unique_table_ids->mut_dptr()),
reinterpret_cast<IDX*>(cur_rank_inverse_indices->mut_dptr()), need_process_table_ids);
IDX* host_num_unique_matrix = kernel_state->HostNumUniqueMatrix();
IDX* host_cur_rank_num_unique = kernel_state->HostCurRankNumUnique();
hipLaunchKernelGGL(( BarrierAndComputeOut), dim3(num_blocks), dim3(1024), 0, cuda_stream,
parallel_id, parallel_num, num_ids, param, num_partitioned_unique,
reinterpret_cast<IDX*>(inverse_unique_partition_indices->mut_dptr()), num_unique_matrix_ptr,
host_num_unique_matrix, cur_rank_num_unique_ids_ptr, host_cur_rank_num_unique);
if (!need_process_table_ids) {
OF_CUDA_CHECK(hipMemsetAsync(cur_rank_unique_table_ids->mut_dptr(), 0,
cur_rank_unique_table_ids->shape_view().elem_cnt() * sizeof(U),
cuda_stream));
}
embedding::EmbeddingState* embedding_state = kernel_state->EmbeddingState();
std::vector<uint32_t> num_unique_matrix_vec(parallel_num * parallel_num);
CHECK_JUST(ctx->stream()->Sync());
std::memcpy(num_unique_matrix_vec.data(), host_num_unique_matrix,
parallel_num * parallel_num * sizeof(IDX));
CHECK_EQ(sizeof(IDX), sizeof(uint32_t)) << "assume sizeof(IDX) equals to sizeof(uint32_t)";
embedding_state->SetIdNumUniqueMatrix(num_unique_matrix_vec, current_iter_);
uint32_t final_num_unique = *host_cur_rank_num_unique;
embedding_state->SetIdFinalNumUnique(final_num_unique, current_iter_);
current_iter_++;
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
mutable int64_t current_iter_;
};
#define ID_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32) \
OF_PP_MAKE_TUPLE_SEQ(uint64_t, DataType::kUInt64) \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32) \
OF_PP_MAKE_TUPLE_SEQ(int64_t, DataType::kInt64)
#define TABLE_ID_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(uint8_t, DataType::kUInt8) \
OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32) \
OF_PP_MAKE_TUPLE_SEQ(uint64_t, DataType::kUInt64) \
OF_PP_MAKE_TUPLE_SEQ(int8_t, DataType::kInt8) \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32) \
OF_PP_MAKE_TUPLE_SEQ(int64_t, DataType::kInt64)
#define IDX_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32) \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
#define REGISTER_CUDA_ID_SHUFFLE_P2P_KERNEL(k_dtype_pair, table_id_dtype_pair, idx_dtype_pair) \
REGISTER_USER_KERNEL("id_shuffle") \
.SetCreateFn<IdShuffleP2PKernel<OF_PP_PAIR_FIRST(k_dtype_pair), \
OF_PP_PAIR_FIRST(table_id_dtype_pair), \
OF_PP_PAIR_FIRST(idx_dtype_pair)>>() \
.SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("ids", 0) == OF_PP_PAIR_SECOND(k_dtype_pair)) \
&& (user_op::HobDataType("cur_rank_unique_table_ids", 0) \
== OF_PP_PAIR_SECOND(table_id_dtype_pair)) \
&& (user_op::HobDataType("num_unique_matrix", 0) == OF_PP_PAIR_SECOND(idx_dtype_pair)) \
&& ParseBooleanFromEnv("ONEFLOW_ONE_EMBEDDING_ID_SHUFFLE_USE_P2P", false)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const user_op::TensorDesc& ids = ctx->InputTensorDesc("ids", 0); \
const bool has_table_ids = ctx->has_input("table_ids", 0); \
const int32_t num_tables = ctx->Attr<int32_t>("num_tables"); \
const bool need_gen_table_ids = (!has_table_ids && num_tables > 1); \
const bool need_process_table_ids = (has_table_ids || num_tables > 1); \
IdShuffleTmpBufferManager<OF_PP_PAIR_FIRST(k_dtype_pair), \
OF_PP_PAIR_FIRST(table_id_dtype_pair), \
OF_PP_PAIR_FIRST(idx_dtype_pair)> \
buffer_manager(nullptr, ids.shape().elem_cnt(), ctx->parallel_desc().parallel_num(), \
need_gen_table_ids, need_process_table_ids); \
return buffer_manager.TotalBufferSize(); \
});
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_CUDA_ID_SHUFFLE_P2P_KERNEL, ID_DATA_TYPE_SEQ,
TABLE_ID_DATA_TYPE_SEQ, IDX_DATA_TYPE_SEQ)
} // namespace oneflow
| 377af7f06ebf386b8bb721cd8ffbfbdb0167f6f9.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/job/parallel_desc.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/core/embedding/hash_functions.cuh"
#include "oneflow/core/embedding/embedding_manager.h"
#include "oneflow/core/control/ctrl_client.h"
namespace oneflow {
namespace {
template<typename K>
struct TableEntry {
K key;
uint32_t value;
};
template<typename K, typename V, typename IDX, typename HASH>
__global__ void HashTableUniqueAndPartitionPairs(
const uint32_t table_capacity, const uint32_t num_keys, int32_t num_partition,
IDX* unique_counts, TableEntry<K>* table, const K* keys, const V* values,
K* partitioned_unique_keys, V* partitioned_unique_values, IDX* reverse_index,
bool need_process_values, int32_t* is_kernel_start) {
CUDA_1D_KERNEL_LOOP_T(uint32_t, i, num_keys) {
IDX r_index_plus_one = 0;
const K key = keys[i];
size_t key_hash = HASH()(key);
uint32_t partition_id = key_hash % num_partition;
IDX* unique_count = unique_counts + partition_id;
K* unique_keys = partitioned_unique_keys + partition_id * num_keys;
uint32_t pos = key_hash % table_capacity;
const K key_hi = (key | 0x1);
const K key_lo = (key & 0x1);
uint32_t counter = 0;
while (r_index_plus_one == 0) {
bool prob_next = false;
K* key_ptr = &table[pos].key;
volatile uint32_t* table_value_ptr = &table[pos].value;
const K old_key = cuda::atomic::CAS(key_ptr, 0, key_hi);
if (old_key == 0) {
IDX unique_pos = cuda::atomic::Add(unique_count, 1);
r_index_plus_one = unique_pos + 1;
unique_keys[unique_pos] = key;
if (need_process_values) {
partitioned_unique_values[partition_id * num_keys + unique_pos] = values[i];
}
*table_value_ptr = ((r_index_plus_one << 1U) | key_lo);
} else if (old_key == key_hi) {
const uint32_t value = *table_value_ptr;
if (value == 0) {
// do nothing
} else if ((value & 0x1) == key_lo) {
r_index_plus_one = (value >> 1U);
} else {
prob_next = true;
}
} else {
prob_next = true;
}
if (prob_next) {
pos += 1;
counter += 1;
if (pos >= table_capacity) { pos -= table_capacity; }
if (counter >= table_capacity) { __trap(); }
}
}
reverse_index[i] = partition_id * num_keys + r_index_plus_one - 1;
}
}
template<typename K, typename U, typename IDX, int N>
struct Param {
IDX* num_unique[N];
K* unique_ids[N];
U* unique_table_ids[N];
int32_t* is_kernel_start[N];
IDX* num_unique_matrix;
int32_t* counter;
};
template<typename T, int pack_size>
struct alignas(sizeof(T) * pack_size) Pack {
T elem[pack_size];
};
template<typename K, typename V, typename IDX, int N, int pack_size>
__global__ void BarrierAndMemset(int32_t parallel_id, int32_t parallel_num,
Param<K, V, IDX, N> param, Pack<char, pack_size>* workspace_ptr,
size_t workspace_num_pack, IDX* counter, int num_counter) {
int count;
if (blockIdx.x == 0) {
count = param.is_kernel_start[parallel_id][parallel_id];
if (threadIdx.x < parallel_num) {
volatile int32_t* start_f = param.is_kernel_start[parallel_id];
start_f[threadIdx.x] = count + 1;
}
}
Pack<char, pack_size> pack_value;
for (int i = 0; i < pack_size; ++i) { pack_value.elem[i] = static_cast<char>(0); }
CUDA_1D_KERNEL_LOOP(i, workspace_num_pack) { workspace_ptr[i] = pack_value; }
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_id < num_counter) { counter[global_thread_id] = 0; }
if (blockIdx.x == 0) {
if (threadIdx.x < parallel_num) {
volatile int32_t* remote_start_f = param.is_kernel_start[threadIdx.x];
while (remote_start_f[parallel_id] < count + 1) {}
}
}
}
template<typename K, typename V, typename IDX, typename HASH, int N>
__global__ void HashTableUniquePairs(const uint32_t table_capacity, const uint32_t num_ids,
int32_t parallel_num, int32_t parallel_id, IDX* unique_count,
TableEntry<K>* table, Param<K, V, IDX, N> param,
K* unique_keys, V* unique_values, IDX* reverse_index,
bool need_process_values) {
#pragma unroll 1
for (int i = 0; i < parallel_num; ++i) {
int rank_id = (parallel_id + i) % parallel_num;
const IDX* num_uniques = param.num_unique[rank_id];
CUDA_1D_KERNEL_LOOP_T(int, rank_index, num_uniques[parallel_id]) {
const IDX* num_uniques = param.num_unique[rank_id];
// if (rank_index >= num_uniques[parallel_id]) { continue; }
const K* keys = param.unique_ids[rank_id];
const V* values = param.unique_table_ids[rank_id];
IDX index_offset = 0;
for (int k = 0; k < rank_id; ++k) { index_offset += param.num_unique[k][parallel_id]; }
IDX r_index_plus_one = 0;
const K key = keys[rank_index];
size_t key_hash = HASH()(key);
uint32_t pos = key_hash % table_capacity;
const K key_hi = (key | 0x1);
const K key_lo = (key & 0x1);
uint32_t counter = 0;
while (r_index_plus_one == 0) {
bool prob_next = false;
K* key_ptr = &table[pos].key;
volatile uint32_t* table_value_ptr = &table[pos].value;
const K old_key = cuda::atomic::CAS(key_ptr, 0, key_hi);
if (old_key == 0) {
IDX unique_pos = cuda::atomic::Add(unique_count, 1);
r_index_plus_one = unique_pos + 1;
unique_keys[unique_pos] = key;
if (need_process_values) { unique_values[unique_pos] = values[rank_index]; }
*table_value_ptr = ((r_index_plus_one << 1U) | key_lo);
} else if (old_key == key_hi) {
const uint32_t value = *table_value_ptr;
if (value == 0) {
// do nothing
} else if ((value & 0x1) == key_lo) {
r_index_plus_one = (value >> 1U);
} else {
prob_next = true;
}
} else {
prob_next = true;
}
if (prob_next) {
pos += 1;
counter += 1;
if (pos >= table_capacity) { pos -= table_capacity; }
if (counter >= table_capacity) { __trap(); }
}
}
reverse_index[rank_index + index_offset] = r_index_plus_one - 1;
if (rank_index < parallel_num) {
param.num_unique_matrix[i * parallel_num + rank_index] = param.num_unique[i][rank_index];
}
}
}
}
template<typename U, typename IDX, int pack_size>
__global__ void GenerateTableIdsAndMemsetUniqueWorkspace(int32_t elem_cnt, int32_t num_tables,
U* table_ids,
Pack<char, pack_size>* workspace_ptr,
size_t workspace_num_pack, IDX* counter,
int num_counter) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) { table_ids[i] = i % num_tables; }
Pack<char, pack_size> pack_value;
for (int i = 0; i < pack_size; ++i) { pack_value.elem[i] = static_cast<char>(0); }
CUDA_1D_KERNEL_LOOP(i, workspace_num_pack) { workspace_ptr[i] = pack_value; }
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_id < num_counter) { counter[global_thread_id] = 0; }
}
template<typename K, typename V, typename IDX, typename HASH>
void UniqueAndPartition(cudaStream_t cuda_stream, int64_t num_blocks, int64_t num_ids,
size_t capacity, int64_t num_partition, const K* ids, const V* table_ids,
IDX* num_partitioned_unique_ids_ptr, K* partitioned_unique_ids,
V* partitioned_unique_table_ids, IDX* inverse_unique_partition_indices,
void* workspace_ptr, size_t workspace_bytes, bool need_process_table_ids,
int32_t* is_kernel_start_ptr) {
size_t table_capacity_bytes = capacity * sizeof(TableEntry<K>);
CHECK_GE(workspace_bytes, table_capacity_bytes);
HashTableUniqueAndPartitionPairs<K, V, IDX, HASH><<<num_blocks, 1024, 0, cuda_stream>>>(
capacity, num_ids, num_partition, num_partitioned_unique_ids_ptr,
reinterpret_cast<TableEntry<K>*>(workspace_ptr), ids, table_ids, partitioned_unique_ids,
partitioned_unique_table_ids, inverse_unique_partition_indices, need_process_table_ids,
is_kernel_start_ptr);
}
enum class IdShuffleBufferType { kTableIds = 0, kWorkspace, kMaxType };
template<typename K, typename U, typename IDX>
class IdShuffleTmpBufferManager final {
public:
OF_DISALLOW_COPY_AND_MOVE(IdShuffleTmpBufferManager);
IdShuffleTmpBufferManager(void* ptr, const int64_t num_ids, const int64_t parallel_num,
bool need_table_ids, bool need_process_table_ids)
: offset_(0),
offsets_(static_cast<size_t>(IdShuffleBufferType::kMaxType), -1),
sizes_(static_cast<size_t>(IdShuffleBufferType::kMaxType)),
ptr_(ptr) {
const int64_t num_table_ids = need_process_table_ids ? num_ids : 0;
const size_t table_ids_bytes = need_table_ids ? num_ids * sizeof(U) : 0;
AllocBuffer(IdShuffleBufferType::kTableIds, table_ids_bytes);
const size_t hash_table_capacity = parallel_num * num_ids;
AllocBuffer(IdShuffleBufferType::kWorkspace, hash_table_capacity * sizeof(TableEntry<K>));
}
template<typename T = void>
T* Ptr(IdShuffleBufferType type) {
CHECK(ptr_ != nullptr);
int64_t offset = offsets_.at(static_cast<size_t>(type));
CHECK_NE(offset, -1);
return reinterpret_cast<T*>(reinterpret_cast<char*>(ptr_) + offset);
}
int64_t Size(IdShuffleBufferType type) { return sizes_.at(static_cast<size_t>(type)); }
size_t TotalBufferSize() const { return offset_; }
private:
void AllocBuffer(IdShuffleBufferType type, size_t size) {
const size_t type_id = static_cast<size_t>(type);
CHECK_EQ(offsets_.at(type_id), -1);
offsets_.at(type_id) = offset_;
sizes_.at(type_id) = size;
offset_ += GetCudaAlignedSize(size);
}
size_t offset_;
std::vector<int64_t> offsets_;
std::vector<int64_t> sizes_;
void* ptr_;
};
template<typename K, typename U, typename IDX>
class DataShuffleKernelState final : public user_op::OpKernelState {
public:
explicit DataShuffleKernelState(user_op::KernelInitContext* ctx)
: device_index_(-1),
parallel_desc_(ctx->parallel_desc()),
parallel_id_(ctx->parallel_ctx().parallel_id()) {
OF_CUDA_CHECK(cudaGetDevice(&device_index_));
int64_t parallel_num = parallel_desc_.parallel_num();
OF_CUDA_CHECK(
cudaMallocHost(&host_num_unique_matrix_, parallel_num * parallel_num * sizeof(IDX)));
OF_CUDA_CHECK(cudaMallocHost(&host_cur_rank_num_unique_, sizeof(IDX)));
const std::string& embedding_name = ctx->Attr<std::string>("embedding_name");
const int64_t parallel_id = parallel_id_;
embedding_state_ = Singleton<embedding::EmbeddingManager>::Get()->GetEmbeddingState(
embedding_name, parallel_id);
const int64_t num_ids = ctx->TensorDesc4ArgNameAndIndex("ids", 0)->shape().elem_cnt();
num_partitioned_unique_size_ = GetCudaAlignedSize(parallel_num * sizeof(IDX));
partitioned_unique_ids_size_ = GetCudaAlignedSize(parallel_num * num_ids * sizeof(K));
partitioned_unique_table_ids_size_ = GetCudaAlignedSize(parallel_num * num_ids * sizeof(U));
is_kernel_start_size_ = GetCudaAlignedSize(parallel_num * sizeof(int32_t));
size_t buffer_size = num_partitioned_unique_size_ + partitioned_unique_ids_size_
+ partitioned_unique_table_ids_size_ + is_kernel_start_size_;
buffer_ptrs_.resize(parallel_num);
cudaMalloc(&buffer_ptrs_.at(parallel_id), buffer_size);
cudaMemset(buffer_ptrs_.at(parallel_id), 0, buffer_size);
}
~DataShuffleKernelState() {
CudaCurrentDeviceGuard guard(device_index_);
OF_CUDA_CHECK(cudaFreeHost(host_cur_rank_num_unique_));
OF_CUDA_CHECK(cudaFreeHost(host_num_unique_matrix_));
OF_CUDA_CHECK(cudaFree(buffer_ptrs_.at(parallel_id_)));
}
std::vector<void*>* BufferPtrs() { return &buffer_ptrs_; }
IDX* HostNumUniqueMatrix() { return host_num_unique_matrix_; }
IDX* HostCurRankNumUnique() { return host_cur_rank_num_unique_; }
embedding::EmbeddingState* EmbeddingState() { return embedding_state_; }
IDX* NumPartitionedUnique(int64_t parallel_id) {
return reinterpret_cast<IDX*>(buffer_ptrs_.at(parallel_id));
}
K* PartitionedUniqueIds(int64_t parallel_id) {
return reinterpret_cast<K*>(reinterpret_cast<char*>(buffer_ptrs_.at(parallel_id))
+ num_partitioned_unique_size_);
}
U* PartitionedUniqueTableIds(int64_t parallel_id) {
return reinterpret_cast<U*>(reinterpret_cast<char*>(buffer_ptrs_.at(parallel_id))
+ num_partitioned_unique_size_ + partitioned_unique_ids_size_);
}
int32_t* IsKernelStart(int64_t parallel_id) {
return reinterpret_cast<int32_t*>(reinterpret_cast<char*>(buffer_ptrs_.at(parallel_id))
+ num_partitioned_unique_size_ + partitioned_unique_ids_size_
+ partitioned_unique_table_ids_size_);
}
private:
int device_index_;
ParallelDesc parallel_desc_;
int64_t parallel_id_;
IDX* host_num_unique_matrix_;
IDX* host_cur_rank_num_unique_;
std::vector<void*> buffer_ptrs_;
size_t num_partitioned_unique_size_;
size_t partitioned_unique_ids_size_;
size_t partitioned_unique_table_ids_size_;
size_t is_kernel_start_size_;
embedding::EmbeddingState* embedding_state_;
};
void GetPtrs(user_op::KernelComputeContext* ctx, std::vector<void*>* buffer_ptrs) {
const int64_t parallel_id = ctx->parallel_ctx().parallel_id();
const int64_t parallel_num = ctx->parallel_ctx().parallel_num();
std::string name = ctx->op_name();
cudaIpcMemHandle_t handle;
OF_CUDA_CHECK(cudaIpcGetMemHandle(&handle, buffer_ptrs->at(parallel_id)));
Singleton<CtrlClient>::Get()->PushKV(
name + std::to_string(parallel_id),
std::string(reinterpret_cast<const char*>(&handle), sizeof(cudaIpcMemHandle_t)));
for (int64_t i = 0; i < parallel_num; ++i) {
std::string key = name + std::to_string(i);
if (parallel_id != i) {
cudaIpcMemHandle_t handle;
Singleton<CtrlClient>::Get()->PullKV(key, [&handle](const std::string& val) {
memcpy(&handle, val.data(), sizeof(cudaIpcMemHandle_t));
});
OF_CUDA_CHECK(
cudaIpcOpenMemHandle(&buffer_ptrs->at(i), handle, cudaIpcMemLazyEnablePeerAccess));
}
}
}
template<typename K, typename V, typename IDX, int N>
__global__ void BarrierAndComputeOut(int32_t parallel_id, int32_t parallel_num, int32_t num_ids,
Param<K, V, IDX, N> param, IDX* num_partitioned_unique,
IDX* inverse_ptr, IDX* num_unique_matrix,
IDX* host_num_unique_matrix, IDX* cur_rank_num_unique,
IDX* host_cur_rank_num_unique) {
int count;
if (blockIdx.x == 0) {
count = param.is_kernel_start[parallel_id][parallel_id];
if (threadIdx.x < parallel_num) {
volatile int32_t* start_f = param.is_kernel_start[parallel_id];
start_f[threadIdx.x] = count + 1;
}
}
if (parallel_num > 1) {
CUDA_1D_KERNEL_LOOP(i, num_ids) {
int inverse_indice = inverse_ptr[i];
int partition_id = inverse_indice / num_ids;
int partition_indice = inverse_indice - partition_id * num_ids;
int new_offset = 0;
for (int k = 0; k < partition_id; ++k) { new_offset += num_partitioned_unique[k]; }
inverse_ptr[i] = new_offset + partition_indice;
}
}
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_id < parallel_num * parallel_num) {
host_num_unique_matrix[global_thread_id] = num_unique_matrix[global_thread_id];
}
if (global_thread_id == 0) {
host_cur_rank_num_unique[global_thread_id] = cur_rank_num_unique[global_thread_id];
}
if (blockIdx.x == 0) {
if (threadIdx.x < parallel_num) {
volatile int32_t* remote_start_f = param.is_kernel_start[threadIdx.x];
while (remote_start_f[parallel_id] < count + 1) {}
}
}
}
} // namespace
template<typename K, typename U, typename IDX>
class IdShuffleP2PKernel final : public user_op::OpKernel {
public:
IdShuffleP2PKernel() : current_iter_(0){};
~IdShuffleP2PKernel() override = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
return std::make_shared<DataShuffleKernelState<K, U, IDX>>(ctx);
}
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state,
const user_op::OpKernelCache*) const override {
auto* kernel_state = dynamic_cast<DataShuffleKernelState<K, U, IDX>*>(state);
CHECK(kernel_state != nullptr);
const user_op::Tensor* ids = ctx->Tensor4ArgNameAndIndex("ids", 0);
user_op::Tensor* num_unique_matrix = ctx->Tensor4ArgNameAndIndex("num_unique_matrix", 0);
user_op::Tensor* inverse_unique_partition_indices =
ctx->Tensor4ArgNameAndIndex("inverse_unique_partition_indices", 0);
user_op::Tensor* cur_rank_num_unique = ctx->Tensor4ArgNameAndIndex("cur_rank_num_unique", 0);
user_op::Tensor* cur_rank_unique_ids = ctx->Tensor4ArgNameAndIndex("cur_rank_unique_ids", 0);
user_op::Tensor* cur_rank_unique_table_ids =
ctx->Tensor4ArgNameAndIndex("cur_rank_unique_table_ids", 0);
user_op::Tensor* cur_rank_inverse_indices =
ctx->Tensor4ArgNameAndIndex("cur_rank_inverse_indices", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t num_tables = ctx->Attr<int32_t>("num_tables");
const bool has_table_ids = ctx->has_input("table_ids", 0);
const bool need_gen_table_ids = (!has_table_ids && num_tables > 1);
const bool need_process_table_ids = (has_table_ids || num_tables > 1);
const int64_t num_ids = ids->shape_view().elem_cnt();
const int64_t parallel_num = ctx->parallel_ctx().parallel_num();
const int64_t parallel_id = ctx->parallel_ctx().parallel_id();
cudaStream_t cuda_stream = ctx->stream()->As<ep::CudaStream>()->cuda_stream();
IdShuffleTmpBufferManager<K, U, IDX> buffer_manager(
tmp_buffer->mut_dptr(), num_ids, parallel_num, need_gen_table_ids, need_process_table_ids);
CHECK_GE(tmp_buffer->shape_view().elem_cnt(), buffer_manager.TotalBufferSize());
if (current_iter_ == 0) { GetPtrs(ctx, kernel_state->BufferPtrs()); }
const int num_blocks =
2 * ctx->stream()->As<ep::CudaStream>()->device_properties().multiProcessorCount;
IDX* num_partitioned_unique = kernel_state->NumPartitionedUnique(parallel_id);
K* partitioned_unique_ids = kernel_state->PartitionedUniqueIds(parallel_id);
U* partitioned_unique_table_ids = kernel_state->PartitionedUniqueTableIds(parallel_id);
IDX* num_unique_matrix_ptr = reinterpret_cast<IDX*>(num_unique_matrix->mut_dptr());
size_t hash_table_capacity = parallel_num * num_ids;
void* workspace_ptr = buffer_manager.Ptr(IdShuffleBufferType::kWorkspace);
size_t workspace_size = buffer_manager.Size(IdShuffleBufferType::kWorkspace);
const U* table_ids_ptr;
bool skip_memset = false;
if (has_table_ids) {
const user_op::Tensor* table_ids = ctx->Tensor4ArgNameAndIndex("table_ids", 0);
table_ids_ptr = reinterpret_cast<const U*>(table_ids->dptr());
} else if (need_gen_table_ids) {
CHECK_EQ(workspace_size % 16, 0);
CHECK_EQ(reinterpret_cast<std::uintptr_t>(workspace_ptr) % 16, 0);
GenerateTableIdsAndMemsetUniqueWorkspace<U, IDX, 16><<<num_blocks, 1024, 0, cuda_stream>>>(
num_ids, num_tables, buffer_manager.template Ptr<U>(IdShuffleBufferType::kTableIds),
reinterpret_cast<Pack<char, 16>*>(workspace_ptr), workspace_size / 16,
num_partitioned_unique, parallel_num);
table_ids_ptr = buffer_manager.template Ptr<U>(IdShuffleBufferType::kTableIds);
skip_memset = true;
} else {
table_ids_ptr = nullptr;
}
if (!skip_memset) {
OF_CUDA_CHECK(cudaMemsetAsync(workspace_ptr, 0, workspace_size, cuda_stream));
OF_CUDA_CHECK(
cudaMemsetAsync(num_partitioned_unique, 0, parallel_num * sizeof(IDX), cuda_stream));
}
UniqueAndPartition<K, U, IDX, embedding::ShardingHash>(
cuda_stream, num_blocks, num_ids, hash_table_capacity, parallel_num,
reinterpret_cast<const K*>(ids->dptr()), table_ids_ptr, num_partitioned_unique,
partitioned_unique_ids, partitioned_unique_table_ids,
reinterpret_cast<IDX*>(inverse_unique_partition_indices->mut_dptr()), workspace_ptr,
workspace_size, need_process_table_ids, kernel_state->IsKernelStart(parallel_id));
IDX* cur_rank_num_unique_ids_ptr = reinterpret_cast<IDX*>(cur_rank_num_unique->mut_dptr());
Param<K, U, IDX, 8> param;
CHECK_LE(parallel_num, 8);
for (int i = 0; i < parallel_num; ++i) {
param.num_unique[i] = kernel_state->NumPartitionedUnique(i);
param.unique_ids[i] = kernel_state->PartitionedUniqueIds(i) + parallel_id * num_ids;
param.unique_table_ids[i] =
kernel_state->PartitionedUniqueTableIds(i) + parallel_id * num_ids;
param.is_kernel_start[i] = kernel_state->IsKernelStart(i);
}
param.num_unique_matrix = num_unique_matrix_ptr;
CHECK_EQ(workspace_size % 16, 0);
CHECK_EQ(reinterpret_cast<std::uintptr_t>(workspace_ptr) % 16, 0);
int workspace_num_pack = workspace_size / 16;
BarrierAndMemset<<<num_blocks, 1024, 0, cuda_stream>>>(
parallel_id, parallel_num, param, reinterpret_cast<Pack<char, 16>*>(workspace_ptr),
workspace_num_pack, cur_rank_num_unique_ids_ptr, 1);
HashTableUniquePairs<K, U, IDX, embedding::LocalUniqueHash>
<<<num_blocks, 1024, 0, cuda_stream>>>(
hash_table_capacity, num_ids, parallel_num, parallel_id, cur_rank_num_unique_ids_ptr,
reinterpret_cast<TableEntry<K>*>(workspace_ptr), param,
reinterpret_cast<K*>(cur_rank_unique_ids->mut_dptr()),
reinterpret_cast<U*>(cur_rank_unique_table_ids->mut_dptr()),
reinterpret_cast<IDX*>(cur_rank_inverse_indices->mut_dptr()), need_process_table_ids);
IDX* host_num_unique_matrix = kernel_state->HostNumUniqueMatrix();
IDX* host_cur_rank_num_unique = kernel_state->HostCurRankNumUnique();
BarrierAndComputeOut<<<num_blocks, 1024, 0, cuda_stream>>>(
parallel_id, parallel_num, num_ids, param, num_partitioned_unique,
reinterpret_cast<IDX*>(inverse_unique_partition_indices->mut_dptr()), num_unique_matrix_ptr,
host_num_unique_matrix, cur_rank_num_unique_ids_ptr, host_cur_rank_num_unique);
if (!need_process_table_ids) {
OF_CUDA_CHECK(cudaMemsetAsync(cur_rank_unique_table_ids->mut_dptr(), 0,
cur_rank_unique_table_ids->shape_view().elem_cnt() * sizeof(U),
cuda_stream));
}
embedding::EmbeddingState* embedding_state = kernel_state->EmbeddingState();
std::vector<uint32_t> num_unique_matrix_vec(parallel_num * parallel_num);
CHECK_JUST(ctx->stream()->Sync());
std::memcpy(num_unique_matrix_vec.data(), host_num_unique_matrix,
parallel_num * parallel_num * sizeof(IDX));
CHECK_EQ(sizeof(IDX), sizeof(uint32_t)) << "assume sizeof(IDX) equals to sizeof(uint32_t)";
embedding_state->SetIdNumUniqueMatrix(num_unique_matrix_vec, current_iter_);
uint32_t final_num_unique = *host_cur_rank_num_unique;
embedding_state->SetIdFinalNumUnique(final_num_unique, current_iter_);
current_iter_++;
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
mutable int64_t current_iter_;
};
#define ID_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32) \
OF_PP_MAKE_TUPLE_SEQ(uint64_t, DataType::kUInt64) \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32) \
OF_PP_MAKE_TUPLE_SEQ(int64_t, DataType::kInt64)
#define TABLE_ID_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(uint8_t, DataType::kUInt8) \
OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32) \
OF_PP_MAKE_TUPLE_SEQ(uint64_t, DataType::kUInt64) \
OF_PP_MAKE_TUPLE_SEQ(int8_t, DataType::kInt8) \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32) \
OF_PP_MAKE_TUPLE_SEQ(int64_t, DataType::kInt64)
#define IDX_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32) \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
#define REGISTER_CUDA_ID_SHUFFLE_P2P_KERNEL(k_dtype_pair, table_id_dtype_pair, idx_dtype_pair) \
REGISTER_USER_KERNEL("id_shuffle") \
.SetCreateFn<IdShuffleP2PKernel<OF_PP_PAIR_FIRST(k_dtype_pair), \
OF_PP_PAIR_FIRST(table_id_dtype_pair), \
OF_PP_PAIR_FIRST(idx_dtype_pair)>>() \
.SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("ids", 0) == OF_PP_PAIR_SECOND(k_dtype_pair)) \
&& (user_op::HobDataType("cur_rank_unique_table_ids", 0) \
== OF_PP_PAIR_SECOND(table_id_dtype_pair)) \
&& (user_op::HobDataType("num_unique_matrix", 0) == OF_PP_PAIR_SECOND(idx_dtype_pair)) \
&& ParseBooleanFromEnv("ONEFLOW_ONE_EMBEDDING_ID_SHUFFLE_USE_P2P", false)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const user_op::TensorDesc& ids = ctx->InputTensorDesc("ids", 0); \
const bool has_table_ids = ctx->has_input("table_ids", 0); \
const int32_t num_tables = ctx->Attr<int32_t>("num_tables"); \
const bool need_gen_table_ids = (!has_table_ids && num_tables > 1); \
const bool need_process_table_ids = (has_table_ids || num_tables > 1); \
IdShuffleTmpBufferManager<OF_PP_PAIR_FIRST(k_dtype_pair), \
OF_PP_PAIR_FIRST(table_id_dtype_pair), \
OF_PP_PAIR_FIRST(idx_dtype_pair)> \
buffer_manager(nullptr, ids.shape().elem_cnt(), ctx->parallel_desc().parallel_num(), \
need_gen_table_ids, need_process_table_ids); \
return buffer_manager.TotalBufferSize(); \
});
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_CUDA_ID_SHUFFLE_P2P_KERNEL, ID_DATA_TYPE_SEQ,
TABLE_ID_DATA_TYPE_SEQ, IDX_DATA_TYPE_SEQ)
} // namespace oneflow
|
afd74952694bcb6afd12f9958f907eb8ab4f5eb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_relu (const int sd, const int fd, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
const REAL val = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(fmax)(val, alpha * val);
}
} | afd74952694bcb6afd12f9958f907eb8ab4f5eb7.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_relu (const int sd, const int fd, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
const REAL val = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(fmax)(val, alpha * val);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.